• Kudu Native RDD


    Spark与Kudu的集成同事提供了kudu RDD

    import org.apache.kudu.spark.kudu.KuduContext
    import org.apache.spark.{SparkConf, SparkContext}
    import org.apache.spark.sql.{Row, SparkSession}
    
    /**
      * Created by angel;
      */
    object KuduNativeRDD {
      def main(args: Array[String]): Unit = {
        val sparkConf = new SparkConf().setAppName("AcctfileProcess")
          //设置Master_IP并设置spark参数
          .setMaster("local")
          .set("spark.worker.timeout", "500")
          .set("spark.cores.max", "10")
          .set("spark.rpc.askTimeout", "600s")
          .set("spark.network.timeout", "600s")
          .set("spark.task.maxFailures", "1")
          .set("spark.speculationfalse", "false")
          .set("spark.driver.allowMultipleContexts", "true")
          .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
        val sparkContext = SparkContext.getOrCreate(sparkConf)
        val sqlContext = SparkSession.builder().config(sparkConf).getOrCreate().sqlContext
        //使用spark创建kudu表
        val kuduMasters = "hadoop01:7051,hadoop02:7051,hadoop03:7051"
        val kuduContext = new KuduContext(kuduMasters, sqlContext.sparkContext)
        //TODO 1:定义kudu表
        val kuduTableName = "spark_kudu_tbl"
        //TODO 2:指定想要的列
        val kuduTableProjColumns = Seq("name", "age")
    
        //TODO 3:读取表,将数据转换成rdd
        val custRDD = kuduContext.kuduRDD(sparkContext, kuduTableName, kuduTableProjColumns)
    
        //TODO 4:将rdd数据转换成tuple
        val custTuple = custRDD.map {
          case Row(name: String, age: Int) => (name, age)
        }
        //TODO 5:打印
        custTuple.collect().foreach(println(_))
      }
    }
  • 相关阅读:
    编译nginx
    MVPN技术原理
    python中_, __, __foo__区别及使用场景
    https双向认证(python)
    http keepalive test code(python)
    压缩 KVM 的 qcow2 镜像文件
    nohup python程序,print无输出
    Less(51)
    Less(50)
    Less(49)
  • 原文地址:https://www.cnblogs.com/niutao/p/10555410.html
Copyright © 2020-2023  润新知