• 大数据笔记(十二)——使用MRUnit进行单元测试


    package demo.wc;
    
    import java.util.ArrayList;
    import java.util.List;
    
    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.io.LongWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mrunit.mapreduce.MapDriver;
    import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver;
    import org.apache.hadoop.mrunit.mapreduce.ReduceDriver;
    import org.junit.Test;
    
    public class MRUnitWordCount {
    
        @Test
        public void testMapper() throws Exception{
            //设置一个环境变量(没有可能会报错)
            System.setProperty("hadoop.home.dir", "D:\temp\hadoop-2.4.1\hadoop-2.4.1");
            
            //创建一个测试对象
            WordCountMapper mapper = new WordCountMapper();
            
            //创建一个MapDriver进行单元测试
            MapDriver<LongWritable, Text, Text, IntWritable> driver = new MapDriver<>(mapper);
            
            //ָ指定Map的输入值: k1  v1
            driver.withInput(new LongWritable(1), new Text("I love Beijing"));
            
            //ָ指定Map的输出值:k2   v2  ----> 期望值
            driver.withOutput(new Text("I"), new IntWritable(1))
                  .withOutput(new Text("love"), new IntWritable(1))
                  .withOutput(new Text("Beijing"), new IntWritable(1));
            
            //ִ执行单元测试,对比 期望的结果和实际的结果
            driver.runTest();
        }
        
        @Test
        public void testReducer() throws Exception{
            //设置一个环境变量
            System.setProperty("hadoop.home.dir", "D:\temp\hadoop-2.4.1\hadoop-2.4.1");
            
            //创建一个测试对象
            WordCountReducer reducer = new WordCountReducer();
            
            //创建一个ReduceDriver进行单元测试
            ReduceDriver<Text, IntWritable, Text, IntWritable> driver = new ReduceDriver<>(reducer);
            
            //构造v3:List
            List<IntWritable> value3 = new ArrayList<>();
            value3.add(new IntWritable(1));
            value3.add(new IntWritable(1));
            value3.add(new IntWritable(1));
            
            
            //指定reducer的输入
            driver.withInput(new Text("Beijing"), value3);
            
            
            //指定reducer的输出
            driver.withOutput(new Text("Beijing"), new IntWritable(3));
            
            
            //ִ执行测试
            driver.runTest();
        }
        
        @Test
        public void testJob() throws Exception{
            //设置一个环境变量
            System.setProperty("hadoop.home.dir", "D:\temp\hadoop-2.4.1\hadoop-2.4.1");
            
            //创建一个测试对象
            WordCountMapper mapper = new WordCountMapper();        
            WordCountReducer reducer = new WordCountReducer();        
            
            //创建一个Driver
            //MapReduceDriver<K1, V1, K2, V2, K4, V4>
            MapReduceDriver<LongWritable, Text, Text, IntWritable, Text, IntWritable>
                    driver = new MapReduceDriver<>(mapper,reducer);
            
            //指定Map输入的数据
            driver.withInput(new LongWritable(1), new Text("I love Beijing"))
                  .withInput(new LongWritable(4), new Text("I love China"))
                  .withInput(new LongWritable(7), new Text("Beijing is the capital of China"));
            
            //ָ指定Reducer的输出
    //        driver.withOutput(new Text("I"), new IntWritable(2))
    //              .withOutput(new Text("love"), new IntWritable(2))
    //              .withOutput(new Text("Beijing"), new IntWritable(2))
    //              .withOutput(new Text("China"), new IntWritable(2))
    //              .withOutput(new Text("is"), new IntWritable(1))
    //              .withOutput(new Text("the"), new IntWritable(1))
    //              .withOutput(new Text("capital"), new IntWritable(1))
    //              .withOutput(new Text("of"), new IntWritable(1));
            
            //指定Reducer的输出(默认排序规则)
            driver.withOutput(new Text("Beijing"), new IntWritable(2))
                  .withOutput(new Text("China"), new IntWritable(2))
                  .withOutput(new Text("I"), new IntWritable(2))
                  .withOutput(new Text("capital"), new IntWritable(1))
                  .withOutput(new Text("is"), new IntWritable(1))
                  .withOutput(new Text("love"), new IntWritable(2))
                  .withOutput(new Text("of"), new IntWritable(1))
                  .withOutput(new Text("the"), new IntWritable(1));
            
            driver.runTest();
        }
    }
  • 相关阅读:
    USACO 2.1 Hamming Codes
    USACO 2.1 Healthy Holsteins
    USACO 2.1 Sorting a Three-Valued Sequence
    USACO 2.1 Ordered Fractions
    USACO 2.1 The Castle
    USACO 1.5 Superprime Rib
    1145: 零起点学算法52——数组中删数II
    1144: 零起点学算法51——数组中删数
    1143: 零起点学算法50——数组中查找数
    1142: 零起点学算法49——找出数组中最大元素的位置(下标值)
  • 原文地址:https://www.cnblogs.com/lingluo2017/p/8540275.html
Copyright © 2020-2023  润新知