• spark wordcount程序


    spark wordcount程序 IllegalAccessError错误

    这个错误是权限错误,错误的引用方法,比如方法中调用private,protect方法。
    当然大家知道wordcount业务很简单,都是调用依赖的方法,最后在一篇博客讲了错误:spark 2.2 读取 Hadoop3.0 数据异常
    我试了下,解决了,我没有像他依赖那么多。

    Exception in thread "main" java.lang.IllegalAccessError: class org.apache.hadoop.hdfs.web.HftpFileSystem cannot access its superinterface org.apache.hadoop.hdfs.web.TokenAspect$TokenManagementDelegator
            at java.lang.ClassLoader.defineClass1(Native Method)
            at java.lang.ClassLoader.defineClass(ClassLoader.java:763)
            at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
            at java.net.URLClassLoader.defineClass(URLClassLoader.java:467)
            at java.net.URLClassLoader.access$100(URLClassLoader.java:73)
            at java.net.URLClassLoader$1.run(URLClassLoader.java:368)
            at java.net.URLClassLoader$1.run(URLClassLoader.java:362)
            at java.security.AccessController.doPrivileged(Native Method)
            at java.net.URLClassLoader.findClass(URLClassLoader.java:361)
            at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
            at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
            at java.lang.Class.forName0(Native Method)
            at java.lang.Class.forName(Class.java:348)
            at java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:370)
            at java.util.ServiceLoader$LazyIterator.next(ServiceLoader.java:404)
            at java.util.ServiceLoader$1.next(ServiceLoader.java:480)
            at org.apache.hadoop.fs.FileSystem.loadFileSystems(FileSystem.java:3202)
            at org.apache.hadoop.fs.FileSystem.getFileSystemClass(FileSystem.java:3247)
            at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:3286)
            at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:123)
            at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:3337)
            at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:3305)
            at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:476)
            at org.apache.spark.util.Utils$.getHadoopFileSystem(Utils.scala:1857)
            at org.apache.spark.scheduler.EventLoggingListener.<init>(EventLoggingListener.scala:68)
            at org.apache.spark.SparkContext.<init>(SparkContext.scala:532)
    

    问题分析

    我使用cdh安装spark的,spark版本是2.4.0,hadoop是hadoop3.0.0。我特地看下idea的extern依赖包,hadoop的依赖包是2.6.X。
    这样的看存在使用的spark包依赖hdoop包版本与生产环境上的hadoop版本不一致。
    解决方式就是spark的依赖hadoop,我屏蔽掉,重新手动导入hadoop包

    解决方式

        <properties>
            <hbase.version>2.0.0</hbase.version>
            <spark.version>2.4.0</spark.version>
            <scala.version>2.11.12</scala.version>
            <hadoop.version>3.0.0</hadoop.version>
        </properties>
    
    
        <dependencies>
    <!--        <dependency>
                <groupId>org.apache.spark</groupId>
                <artifactId>spark-core_2.12</artifactId>
                <version>${spark.version}</version>
                <scope>provided</scope>
            </dependency>-->
    
    <!--        解决IllegalAccessError-->
            <dependency>
                <groupId>org.apache.hadoop</groupId>
                <artifactId>hadoop-client</artifactId>
                <version>${hadoop.version}</version>
            </dependency>
    
    
            <dependency>
                <groupId>org.apache.spark</groupId>
                <artifactId>spark-core_2.11</artifactId>
                <version>${spark.version}</version>
                <exclusions>
                    <exclusion>
                        <artifactId>hadoop-client</artifactId>
                        <groupId>org.apache.hadoop</groupId>
                    </exclusion>
                </exclusions>
            </dependency>
    </dependencies>
    

    附录 wordcount程序

    package com.learn.hadoop.spark.wordcount;
    
    import org.apache.spark.SparkConf;
    import org.apache.spark.api.java.JavaPairRDD;
    import org.apache.spark.api.java.JavaRDD;
    import org.apache.spark.api.java.JavaSparkContext;
    import org.apache.spark.api.java.function.FlatMapFunction;
    import org.apache.spark.api.java.function.Function2;
    import org.apache.spark.api.java.function.PairFunction;
    import org.apache.spark.api.java.function.VoidFunction;
    import scala.Tuple2;
    
    import java.util.Arrays;
    import java.util.Iterator;
    
    public class WordCount {
        public static void main(String[] args) {
            if(args ==null ||args.length<1)
            {
                System.err.println("please input paths");
                System.exit(1);
            }
            String outfile =args[0];
            SparkConf sparkConf =new SparkConf().setMaster("local").setAppName("wordCount");
            JavaSparkContext sc =new JavaSparkContext(sparkConf);
            String inputFile="README.MD";
            JavaRDD<String> input =sc.textFile(inputFile);
            JavaRDD<String> lines =input.flatMap(new FlatMapFunction<String, String>() {
                @Override
                public Iterator<String> call(String s) throws Exception {
                    return Arrays.asList(s.split(" ")).iterator();
                    //return null;
                }
            });
    
            //paris
            JavaPairRDD<String,Integer> paris = lines.mapToPair(new PairFunction<String, String, Integer>() {
                @Override
                public Tuple2<String, Integer> call(String s) throws Exception {
                    return new Tuple2<String,Integer>(s,1);
                }
            });
            //redues
            JavaPairRDD<String,Integer> counts=paris.reduceByKey(new Function2<Integer, Integer, Integer>() {
                @Override
                public Integer call(Integer integer, Integer integer2) throws Exception {
                    return integer+integer2;
                }
            });
            //output
            counts.foreach(new VoidFunction<Tuple2<String, Integer>>() {
                @Override
                public void call(Tuple2<String, Integer> stringIntegerTuple2) throws Exception {
                    System.out.println(stringIntegerTuple2);
                }
            });
            counts.saveAsTextFile(outfile);
            sc.stop();
    
        }
    }
    
    
  • 相关阅读:
    Equivalent Sets HDU
    Chemical table CFR500 div2D(并查集)
    How do I create an installation log?
    Error 1937.An error occurred during the installation of assembly...
    InstallShield 版本转换
    Convert CString to TCHAR
    InstallShield : 如何查找编译后的 Merge Module存放路径
    Msi.h causes compilation error in vs2010
    区间调度(贪心)
    硬币问题(贪心)
  • 原文地址:https://www.cnblogs.com/JuncaiF/p/12356011.html
Copyright © 2020-2023  润新知