第一次跑hadoop实例,中间经过了不少弯路,特此记录下来:
第一步:建立一个maven过程,pom.xml文件:(打包为jar包)
<dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-client</artifactId> <version>2.7.0</version> </dependency>
第二步:创建一个WordCount(从官网上copy):
import java.io.IOException; import java.util.StringTokenizer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; public class WordCount { public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable>{ private final static IntWritable one = new IntWritable(1); private Text word = new Text(); public void map(Object key, Text value, Context context ) throws IOException, InterruptedException { StringTokenizer itr = new StringTokenizer(value.toString()); while (itr.hasMoreTokens()) { word.set(itr.nextToken()); context.write(word, one); } } } public static class IntSumReducer extends Reducer<Text,IntWritable,Text,IntWritable> { private IntWritable result = new IntWritable(); public void reduce(Text key, Iterable<IntWritable> values, Context context ) throws IOException, InterruptedException { int sum = 0; for (IntWritable val : values) { sum += val.get(); } result.set(sum); context.write(key, result); } } public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); Job job = Job.getInstance(conf, "word count"); job.setJarByClass(WordCount.class); job.setMapperClass(TokenizerMapper.class); job.setCombinerClass(IntSumReducer.class); job.setReducerClass(IntSumReducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); FileInputFormat.addInputPath(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); System.exit(job.waitForCompletion(true) ? 0 : 1); } }
第三步:打jar包:
mvn clean install
第四步:将jar包放入hadoop集群中的master机器上。
第五步:设置hdfs文件输入目录
在hadoop-2.6.0/etc/hadoop目录下core-site配置:
<configuration> <property> <name>fs.defaultFS</name> <value>hdfs://master:9000/</value> </property> <property> <name>hadoop.tmp.dir</name> <value>file:/home/localadmin/filedata</value> </property> </configuration>
上面可以看到hdfs的根目录,或者使用命令查看:
bin/hadoop fs -ls /
设置输入目录
在/home/localadmin创建filedata/infile目录,并创建文件file01,file02
bin/hadoop fs -put /home/localadmin/filedata/infile/
bin/hadoop fs -put /home/localadmin/filedata/infile/file01
bin/hadoop fs -put /home/localadmin/filedata/infile/file02
检查文件情况命令:
# bin/hadoop fs -ls /home/localadmin/filedata/input
Found 2 items
-rw-r--r-- 3 root supergroup 22 2015-12-25 13:56 /home/localadmin/filedata/input/file01
-rw-r--r-- 3 root supergroup 28 2015-12-25 13:56 /home/localadmin/filedata/input/file02
注意:不要设置输出目录:
hadoop 由于进行的是耗费资源的计算,生产的结果默认是不能被覆盖的,
因此中间结果输出目录一定不能存在,否则出现这个错误。
第六步:执行命令:
hadoop jar wc.jar com.nonobank.hadoop.WordCount ../filedata/input/ ../filedata/output/
参考文献:
【1】http://blog.sina.com.cn/s/blog_757dbe670101gnj9.html
【2】https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html#Example:_WordCount_v1.0
【3】http://blog.itpub.net/26230597/viewspace-1370205/