• java实现spark常用算子之ReduceByKey


    import org.apache.spark.SparkConf;
    import org.apache.spark.api.java.JavaPairRDD;
    import org.apache.spark.api.java.JavaSparkContext;
    import org.apache.spark.api.java.function.Function2;
    import org.apache.spark.api.java.function.VoidFunction;
    import scala.Tuple2;

    import java.util.Arrays;
    import java.util.List;

    /**
    * reduceByKey(fun,[numTasks]) 算子:
    * 根据key将value聚合,然后根据fun进行计算
    * 可以设置并行度
    * reduceByKey = groupByKey+reduce
    */
    public class ReduceByKeyOperator {
    public static void main(String[] args){
    SparkConf conf = new SparkConf().setMaster("local").setAppName("reduceByKey");
    JavaSparkContext sc = new JavaSparkContext(conf);

    List<Tuple2<String,Integer>> list = Arrays.asList(
    new Tuple2<String,Integer>("w1",1),
    new Tuple2<String,Integer>("w2",2),
    new Tuple2<String,Integer>("w3",3),
    new Tuple2<String,Integer>("w2",22),
    new Tuple2<String,Integer>("w1",11)
    );

    JavaPairRDD<String,Integer> pairRdd = sc.parallelizePairs(list);

    JavaPairRDD<String,Integer> result = pairRdd.reduceByKey(new Function2<Integer, Integer, Integer>() {
    @Override
    public Integer call(Integer integer, Integer integer2) throws Exception {
    return integer+integer2;
    }
    },2);

    result.foreach(new VoidFunction<Tuple2<String, Integer>>() {
    @Override
    public void call(Tuple2<String, Integer> stringIntegerTuple2) throws Exception {
    System.err.println(stringIntegerTuple2._1+":"+stringIntegerTuple2._2);
    }
    });

    }
    }

    微信扫描下图二维码加入博主知识星球,获取更多大数据、人工智能、算法等免费学习资料哦!

  • 相关阅读:
    自己遇到的冲突及解决方案
    怎么解决代码冲突及切换分支
    程序员修养
    代码回退
    gitlab两种连接方式:ssh和http配置介绍
    gitlab创建项目及分支
    github,gitlab的区别
    代码托管有什么用
    新手搭建云服务器详细过程
    UNP学习笔记(第十一章 名字与地址转换)
  • 原文地址:https://www.cnblogs.com/guokai870510826/p/11635177.html
Copyright © 2020-2023  润新知