• Hadoop 统计文件中某个单词出现的次数



    如文件word.txt内容如下:

    what is you name?

    my name is zhang san。

    要求统计word.txt中出现“is”的次数?

    代码如下:

    PerWordMapper

    package com.hadoop.wordcount;
    
    import java.io.IOException;
    import java.util.StringTokenizer;
    
    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Mapper;
    
    public class PerWordMapper extends Mapper<Object, Text, Text, IntWritable> {
    
    	public Text keyText = new Text();
    	public IntWritable intValue = new IntWritable(1);
    	
    	@Override
    	protected void map(Object key, Text value,
    			Context context)
    			throws IOException, InterruptedException {
    		String str = value.toString();
    		StringTokenizer to = new StringTokenizer(str);
    		while (to.hasMoreTokens()) {
    			String t = to.nextToken();
    			//此处为判断统计字符串的地方
    			if(t.equals("is")){
    				keyText = new Text(t);
    				context.write(keyText, intValue);
    			}
    	       
    	     }
    	}
    }
    


     

    PerWordReducer

    package com.hadoop.wordcount;
    
    import java.io.IOException;
    
    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Reducer;
    
    public class PerWordReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
    
    	public IntWritable intValue = new IntWritable(0);
    	@Override
    	protected void reduce(Text key, Iterable<IntWritable> value,
    			Context context)
    			throws IOException, InterruptedException {
    		int sum = 0;
    		while(value.iterator().hasNext()){
    			sum += value.iterator().next().get();
    		}
    		intValue.set(sum);
    		context.write(key, intValue);
    	}
    	
    }
    


    PerWordCount

    package com.hadoop.wordcount;
    
    import java.io.IOException;
    
    import org.apache.hadoop.conf.Configuration;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Job;
    import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
    import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
    import org.apache.hadoop.util.GenericOptionsParser;
    
    import com.hadoop.mapreducer.MapperClass;
    import com.hadoop.mapreducer.ReducerClass;
    import com.hadoop.mapreducer.WordCount;
    
    public class PerWordCount {
    	public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
    		Configuration conf = new Configuration();
    	    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    	    System.out.println("otherArgs.length:"+otherArgs.length);
    	    if (otherArgs.length != 2) {
    	      System.err.println("Usage: wordcount <in> <out>");
    	      System.exit(2);
    	    }
    	    Job job = new Job(conf, "word count");
    	    job.setJarByClass(PerWordCount.class);
    	    job.setMapperClass(PerWordMapper.class);
    	    job.setCombinerClass(PerWordReducer.class);
    	    job.setReducerClass(PerWordReducer.class);
    	    job.setOutputKeyClass(Text.class);
    	    job.setOutputValueClass(IntWritable.class);
    	    FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
    	    FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
    	    System.exit(job.waitForCompletion(true) ? 0 : 1);
    	}
    
    }
    



     

  • 相关阅读:
    MYSQL
    Oracle建立表空间和用户
    oracle创建表空间
    MySQL数据库远程连接开启方法
    linux mkfs命令参数及用法详解---linux格式化文件系统命令(包括swap分区)
    小峰Hibernate简介与HelloWorld
    数据结构与算法JavaScript描述——链表
    数据结构与算法JavaScript描述——使用队列
    数据结构与算法JavaScript描述——队列
    数据结构与算法JavaScript描述——栈的使用
  • 原文地址:https://www.cnblogs.com/jiangu66/p/3187119.html
Copyright © 2020-2023  润新知