-1

I am trying to read data from aws s3 into dataset/rdd in Java. I am running Spark code in Java on IntelliJ, so added Hadoop dependencies as well in pom.xml

Below is my code and pom.xml file.

    import org.apache.spark.api.java.JavaRDD;
    import org.apache.spark.sql.SparkSession;
    import org.apache.spark.api.java.JavaSparkContext;

    public class SparkJava {

        public static void main(String[] args){

            SparkSession spark  = SparkSession
                    .builder()
                    .master("local")
                    .config("spark.hadoop.fs.s3a.impl","org.apache.hadoop.fs.s3a.S3AFileSystem")                  
                    .config("spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version", "2")
                    .config("fs.s3n.awsAccessKeyId", AWS_KEY)
                    .config("fs.s3n.awsSecretAccessKey", AWS_SECRET_KEY)
                    .getOrCreate();

            JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
            String input_path = "s3a://bucket/2018/07/28";
            JavaRDD<String> s3aRdd = sc.textFile(input_path);
            long count  = s3aRdd.count();  // THIS IS CAUSING EXCEPTION
            System.out.print(count);
            System.out.print("Finished");         
        }
    }

Here are the dependencies from pom.xml

<dependencies>
    <dependency>
        <groupId>org.apache.spark</groupId>
        <artifactId>spark-core_2.11</artifactId>
        <version>2.3.1</version>
    </dependency>
    <dependency>
        <groupId>org.apache.spark</groupId>
        <artifactId>spark-sql_2.11</artifactId>
        <version>2.3.1</version>
    </dependency>
    <dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-aws</artifactId>
        <version>3.1.1</version>
    </dependency>
    <dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-hdfs</artifactId>
        <version>3.1.1</version>
    </dependency>
</dependencies>

There is no version issue in this case as mentioned in this question: NoClassDefFoundError: org/apache/hadoop/fs/StreamCapabilities while reading s3 Data with spark

Atihska
  • 4,803
  • 10
  • 56
  • 98

1 Answers1

5

Solved this by adding the following dependency in pom.xml besides the above:

<dependency>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-common</artifactId>
    <version>3.1.1</version>
</dependency>
Atihska
  • 4,803
  • 10
  • 56
  • 98