-1

I am once again asking for your technical support.

I receive multiple "cannot find symbol errors" in my code. How can I fix that error? I also have problems converting variable types.

For some of those variables, I have trying to convert them using for example: new Text(some_string_variable). Is it necessary to use the 'new' part?. Can I just call Text(some_string_variable)?

import java.io.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.util.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;




public class mapreduce{

  public static class XMapper extends Mapper<Object, Text, IntWritable, Text>{
 //@Override

    //private IntWritable keys = new IntWritable();
    private Text nums = new Text();

    private final static IntWritable one = new IntWritable(1);

    public void map(Object key, Text value, Context context) throws IOException, InterruptedException {

    String[] Columns = value.toString().split(",");
    String col0 = Columns[0];
    String col1 = Columns[1];
    String col2 = Columns[2];
    String col3 = Columns[3];

    int colInt0 = Integer.parseInt(col0);
    int colInt2 = Integer.parseInt(col2);
    double colInt3 = Double.parseDouble(col3);

    if(colInt0 != 0 && colInt2 !=0 && colInt3 !=0){

        nums = String.valueOf(one)+"\t"+String.valueOf(colInt3);

        context.write(new IntWritable(colInt0), new Text(nums));


        }


    }

  }

  public static class XReducer extends Reducer<IntWritable,Text,IntWritable,Text>{
    public Text tupleResult = new Text();

    private IntWritable result = new IntWritable();

    public void reduce(IntWritable key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
        //int colKey = 0;

        //int colVal = 0;
        int countAll = 0;
        float sumAll = 0;

        for(Text val: values){

            StringTokenizer itr = new StringTokenizer(val.toString());

            int count = Integer.parseInt(itr.nextToken());
            double sum = Double.parseDouble(itr.nextToken());



            }
        TupleResult.set(Integer.toString(count)+','+Double.toString(sum));
        context.write(new IntWritable(key),new Text(TupleResult));

        }


    }




  public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    Job job = Job.getInstance(conf, "mapreduce");

    /* TODO: Needs to be implemented */
    job.setJarByClass(mapreduce.class);
    job.setMapperClass(XMapper.class);
    job.setReducerClass(XReducer.class);
    job.setOutputKeyClass(IntWritable.class);
    job.setOutputValueClass(Text.class);

    //

    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));
    System.exit(job.waitForCompletion(true) ? 0 : 1);
  }
}

[ERROR]  mapreduce.java:[38,48] incompatible types: java.lang.String cannot be converted to org.apache.hadoop.io.Text
[ERROR] mapreduce.java:[64,25] cannot find symbol
[ERROR]   symbol:   class StringTokenizer
[ERROR]   location: class mapreduce.XReducer
[ERROR] mapreduce.java:[64,51] cannot find symbol
[ERROR]   symbol:   class StringTokenizer
[ERROR]   location: class mapreduce.XReducer
[ERROR] mapreduce.java:[72,50] cannot find symbol
[ERROR]   symbol:   variable count
[ERROR]   location: class mapreduce.XReducer
[ERROR] mapreduce.java:[72,77] cannot find symbol
[ERROR]   symbol:   variable sum
[ERROR]   location: class mapreduce.XReducer
[ERROR] mapreduce.java:[72,17] cannot find symbol
[ERROR]   symbol:   variable TupleResult
[ERROR]   location: class mapreduce.XReducer
[ERROR] mapreduce.java:[73,47] incompatible types: org.apache.hadoop.io.IntWritable cannot be converted to int
[ERROR] mapreduce.java:[73,61] cannot find symbol
[ERROR]   symbol:   variable TupleResult
[ERROR]   location: class mapreduce.XReducer
Olaf Kock
  • 46,930
  • 8
  • 59
  • 90
Poskaz
  • 65
  • 6

2 Answers2

0

Symbole not found error is may be due to the fact that all the classes used are not imported for example StringTokenizer (https://docs.oracle.com/javase/7/docs/api/java/util/StringTokenizer.html)

The new part are mandatory because you are creating an hadoop Text object

Best regards

TOTO
  • 307
  • 1
  • 6
0

There are a lot problems in your code.

  1. You miss the import for the class StringTokenizer
  2. you use the variables sum and count outside of his scope.
  3. you should take care of java naming conventions. Class names should start with upper case character.

Also you should format your code properbly

import java.io.*;
import java.util.StringTokenizer;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.util.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/* TODO: Update variable below with your gtid */
public class Mapreduce{

  public static class XMapper extends Mapper<Object, Text, IntWritable, Text>{
 //@Override

    //private IntWritable keys = new IntWritable();
    private Text nums = new Text();

    private final static IntWritable one = new IntWritable(1);

    public void map(Object key, Text value, Context context) throws IOException, InterruptedException {

    String[] Columns = value.toString().split(",");
    String col0 = Columns[0];
    String col1 = Columns[1];
    String col2 = Columns[2];
    String col3 = Columns[3];

    int colInt0 = Integer.parseInt(col0);
    int colInt2 = Integer.parseInt(col2);
    double colInt3 = Double.parseDouble(col3);

    if(colInt0 != 0 && colInt2 !=0 && colInt3 !=0){

        nums = String.valueOf(one)+"\t"+String.valueOf(colInt3);

        context.write(new IntWritable(colInt0), new Text(nums));


        }


    }

  }

  public static class XReducer extends Reducer<IntWritable,Text,IntWritable,Text>{
    public Text tupleResult = new Text();

    private IntWritable result = new IntWritable();

    public void reduce(IntWritable key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
        //int colKey = 0;

        //int colVal = 0;
        int countAll = 0;
        float sumAll = 0;
        for(Text val: values){

            StringTokenizer itr = new StringTokenizer(val.toString());

            int count = Integer.parseInt(itr.nextToken());
            double sum = Double.parseDouble(itr.nextToken());


            TupleResult.set(Integer.toString(count)+','+Double.toString(sum));
           context.write(new IntWritable(key),new Text(TupleResult));

            }

        }


    }




  public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    Job job = Job.getInstance(conf, "mapreduce");

    /* TODO: Needs to be implemented */
    job.setJarByClass(mapreduce.class);
    job.setMapperClass(XMapper.class);
    job.setReducerClass(XReducer.class);
    job.setOutputKeyClass(IntWritable.class);
    job.setOutputValueClass(Text.class);

    //

    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));
    System.exit(job.waitForCompletion(true) ? 0 : 1);
  }
}
Jens
  • 67,715
  • 15
  • 98
  • 113