1 读取本地文件
./spark-shell
scala> val textFile=sc.textFile("file:///home/hadoop/wordfile1.txt")
textFile: org.apache.spark.rdd.RDD[String] = file:///home/hadoop/wordfile1.txt MapPartitionsRDD[3] at textFile at <console>:24
scala> textFile.first()
res2: String = I love Spark
2 读取hdfs文件
首先要启动hdfs,然后上传文件至hdfs,才能用下面的命令读取。
scala> val textFile=sc.textFile("hdfs://localhost:9000/user/hadoop/input/wordfile1.txt")
textFile: org.apache.spark.rdd.RDD[String] = hdfs://localhost:9000/user/hadoop/input/wordfile1.txt MapPartitionsRDD[7] at textFile at <console>:24
scala> textFile.first()
res4: String = I love Spark
scala> val textFile=sc.textFile("input/wordfile1.txt")
textFile: org.apache.spark.rdd.RDD[String] = input/wordfile1.txt MapPartitionsRDD[9] at textFile at <console>:24
scala> textFile.first()
res5: String = I love Spark
scala> val textFile=sc.textFile("/user/hadoop/input/wordfile1.txt")
textFile: org.apache.spark.rdd.RDD[String] = /user/hadoop/input/wordfile1.txt MapPartitionsRDD[11] at textFile at <console>:24
scala> textFile.count()
res6: Long = 2
scala> textFile.first()
res8: String = I love Spark
3 词频统计
scala> val wordCount=textFile.flatMap(line=>line.split(" ")).map(word=>(word,1)).reduceByKey((a,b)=>(a+b))
wordCount: org.apache.spark.rdd.RDD[(String, Int)] = ShuffledRDD[14] at reduceByKey at <console>:26
scala> wordCount.collect()
res9: Array[(String, Int)] = Array((Spark,1), (love,2), (I,2), (Hadoop,1))