• Hadoop最基本的wordcount(统计词频)


    package com.uniclick.dapa.dstest;
    
    import java.io.IOException;
    import java.net.URI;
    
    import org.apache.hadoop.conf.Configuration;
    import org.apache.hadoop.fs.FileSystem;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.io.LongWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Job;
    import org.apache.hadoop.mapreduce.Mapper;
    import org.apache.hadoop.mapreduce.Reducer;
    import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
    import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
    
    public class WordCount {
    	public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
    		String inputFilePath = "/user/zhouyuanlong/wordcount/input/wordTest*.txt";
    		String outputFilePath = "/user/zhouyuanlong/wordcount/output/";
    		String queue = "default";
    		String jobName = "wordCount";
    		if(args == null || args.length < 2){
    			System.out.println("[-INPUT <inputFilePath>"
    					+ "[-OUTPUT <outputFilePath>");
    		}else{
    			for(int i=0;i<args.length;i++){
    				if("-Q".equals(args[i])){
    					queue = args[++i];
    				}
    			}
    		}
    		Configuration conf = new Configuration();
    		conf.set("mapred.job.queue.name", queue);
    		Job job = new Job(conf, jobName);
    		job.setJarByClass(WordCount.class);
    		job.setMapperClass(WordCountMapper.class);
    //		job.setCombinerClass(cls);
    		job.setReducerClass(WordCountReducer.class);
    		job.setOutputKeyClass(Text.class);
    		job.setOutputValueClass(IntWritable.class);
    		FileInputFormat.addInputPath(job, new Path(inputFilePath));
    		Path path = new Path(outputFilePath);
    		FileSystem fs = FileSystem.get(URI.create(outputFilePath), conf);
    		if(fs.exists(path)){
    //			fs.delete(path);
    			fs.delete(path, true);
    		}
    		FileOutputFormat.setOutputPath(job, new Path(outputFilePath));
    		System.exit(job.waitForCompletion(true) ? 1 : 0);
    	}
    	
    	public static class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable>{
    		private Text kt = new Text();
    		private final static IntWritable vt = new IntWritable(1);
    
    		public void map(LongWritable key, Text value, Context context)
    				throws IOException, InterruptedException {
    			String[] arr = value.toString().split("	");
    			for(int i = 0; i < arr.length; i++){
    				kt.set(arr[i]);
    				context.write(kt, vt);
    			}
    		}
    	}
    	
    	public static class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable>{
    		private IntWritable vt = new IntWritable();
    		
    		public void reduce(Text key, Iterable<IntWritable> values, Context context) 
    				throws IOException, InterruptedException{
    			int sum = 0;
    			for(IntWritable intVal : values){
    				sum += intVal.get();
    			}
    			vt.set(sum);
    			context.write(key, vt);
    		}
    	}
    	
    }
    


    input目录中文件wordTest1.txt的内容(每行以table键分隔):

    hello    world
    hello    hadoop
    hello    mapredruce


    input目录中文件wordTest2.txt的内容(每行以table键分隔):

    hello    world
    hello    hadoop
    hello    mapredruce

    hdfs输出结果:

    web     2
    mapredruce      1
    python  1
    hadoop  1
    hello   6
    clojure 2
    world   1
    java    2


    PS:对Hadoop自带的wordcount的例子略有改变

  • 相关阅读:
    perl中shift 和unshift 操作
    Perl 关于 use strict 的用法
    Windows7鼠标右键里没有新建文本文件的选项,解决办法
    大唐笔试题
    常用的设计模式
    优化C++程序编译效率的一些方法
    TCP连接,传输数据时的粘包问题讨论
    单链表是否有环及环入口点
    构造函数和析构函数能否声明为虚函数?
    azkaban 执行hive语句
  • 原文地址:https://www.cnblogs.com/snake-hand/p/3178012.html
Copyright © 2020-2023  润新知