• Hadoop最基本的wordcount(统计词频)


    package com.uniclick.dapa.dstest;
    
    import java.io.IOException;
    import java.net.URI;
    
    import org.apache.hadoop.conf.Configuration;
    import org.apache.hadoop.fs.FileSystem;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.io.LongWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Job;
    import org.apache.hadoop.mapreduce.Mapper;
    import org.apache.hadoop.mapreduce.Reducer;
    import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
    import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
    
    public class WordCount {
    	public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
    		String inputFilePath = "/user/zhouyuanlong/wordcount/input/wordTest*.txt";
    		String outputFilePath = "/user/zhouyuanlong/wordcount/output/";
    		String queue = "default";
    		String jobName = "wordCount";
    		if(args == null || args.length < 2){
    			System.out.println("[-INPUT <inputFilePath>"
    					+ "[-OUTPUT <outputFilePath>");
    		}else{
    			for(int i=0;i<args.length;i++){
    				if("-Q".equals(args[i])){
    					queue = args[++i];
    				}
    			}
    		}
    		Configuration conf = new Configuration();
    		conf.set("mapred.job.queue.name", queue);
    		Job job = new Job(conf, jobName);
    		job.setJarByClass(WordCount.class);
    		job.setMapperClass(WordCountMapper.class);
    //		job.setCombinerClass(cls);
    		job.setReducerClass(WordCountReducer.class);
    		job.setOutputKeyClass(Text.class);
    		job.setOutputValueClass(IntWritable.class);
    		FileInputFormat.addInputPath(job, new Path(inputFilePath));
    		Path path = new Path(outputFilePath);
    		FileSystem fs = FileSystem.get(URI.create(outputFilePath), conf);
    		if(fs.exists(path)){
    //			fs.delete(path);
    			fs.delete(path, true);
    		}
    		FileOutputFormat.setOutputPath(job, new Path(outputFilePath));
    		System.exit(job.waitForCompletion(true) ? 1 : 0);
    	}
    	
    	public static class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable>{
    		private Text kt = new Text();
    		private final static IntWritable vt = new IntWritable(1);
    
    		public void map(LongWritable key, Text value, Context context)
    				throws IOException, InterruptedException {
    			String[] arr = value.toString().split("	");
    			for(int i = 0; i < arr.length; i++){
    				kt.set(arr[i]);
    				context.write(kt, vt);
    			}
    		}
    	}
    	
    	public static class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable>{
    		private IntWritable vt = new IntWritable();
    		
    		public void reduce(Text key, Iterable<IntWritable> values, Context context) 
    				throws IOException, InterruptedException{
    			int sum = 0;
    			for(IntWritable intVal : values){
    				sum += intVal.get();
    			}
    			vt.set(sum);
    			context.write(key, vt);
    		}
    	}
    	
    }
    


    input目录中文件wordTest1.txt的内容(每行以table键分隔):

    hello    world
    hello    hadoop
    hello    mapredruce


    input目录中文件wordTest2.txt的内容(每行以table键分隔):

    hello    world
    hello    hadoop
    hello    mapredruce

    hdfs输出结果:

    web     2
    mapredruce      1
    python  1
    hadoop  1
    hello   6
    clojure 2
    world   1
    java    2


    PS:对Hadoop自带的wordcount的例子略有改变

  • 相关阅读:
    ES6中对象新增方法
    ES6中字符串新增方法
    Laya 吐槽日志.
    汇编与反汇编工具
    Mac 软件下载地址
    红米手机 android4.4.4 root之路
    查看apk安装包信息
    文件搜索
    自动发表QQ空间说说
    批量格式化json
  • 原文地址:https://www.cnblogs.com/snake-hand/p/3178012.html
Copyright © 2020-2023  润新知