-1

I have the following algorithm that sort data with alphabetic order

public void setup(Context context) throws IOException,
        InterruptedException {
      conf = context.getConfiguration();
      caseSensitive = conf.getBoolean("amasort.case.sensitive", true);

    }

    @Override
    public void map(Object key, Text value, Context context
                    ) throws IOException, InterruptedException {
      String line = (caseSensitive) ? value.toString() : value.toString().toLowerCase();
      word.set(line+"_"+key.toString());
      context.write(word, one);
      System.out.println("key:"+key.toString()+";value:"+value.toString());
      }
    }

  public static class ForwardReducer
       extends Reducer<Text,NullWritable,Text,NullWritable> {
    private NullWritable result = NullWritable.get();

    public void reduce(Text key, Iterable<NullWritable> values,
                       Context context
                       ) throws IOException, InterruptedException {

      String originalWord = key.toString();
      originalWord = originalWord.substring(0, originalWord.lastIndexOf("_"));
      key.set(originalWord);
      context.write(key, result);
    }
  }

  public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    GenericOptionsParser optionParser = new GenericOptionsParser(conf, args);
    String[] remainingArgs = optionParser.getRemainingArgs();
    Job job = Job.getInstance(conf, "word sort");
    job.setJarByClass(AmaSort.class);
    job.setMapperClass(LineMapper.class);
//    job.setCombinerClass(ForwardReducer.class);
    job.setReducerClass(ForwardReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(NullWritable.class);

    FileInputFormat.addInputPath(job, new Path(remainingArgs[0]));
    FileOutputFormat.setOutputPath(job, new Path(remainingArgs[1]));

    System.exit(job.waitForCompletion(true) ? 0 : 1);

i tried this algorithm to sort mydata set that contain (@xxxxxxx , 0,tcp,xx,1,1,1,2,4,5,....) but the output all line start with @ are deleted and data line structure 0,tcp,x1x1,1,114,.... are modified, i just want to sort my dataset with this specific character (@) all Line start with @ in first of file and the rest stay same structure. Anyone can help me please to modify this algorithm ?

BigBosss
  • 5
  • 2

1 Answers1

0

You can use below modified code to perform sorting,

import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class AmaSort
{
    static Configuration conf = null;
    private static boolean caseSensitive;
    private static Text word = new Text();

    public static class LineMapper extends Mapper<Object, Text, Text, NullWritable>{

        public void setup(Context context) throws IOException, InterruptedException
        {
            conf = context.getConfiguration();
            caseSensitive = conf.getBoolean("amasort.case.sensitive", true);

        }

        @Override
        public void map(Object key, Text value, Context context) throws IOException, InterruptedException
        {
            String line = (caseSensitive) ? value.toString() : value.toString().toLowerCase();
            word.set(line);
            context.write(word, NullWritable.get());

        }
    }

    public static class ForwardReducer extends Reducer<Text, NullWritable, Text, NullWritable>
    {
        private NullWritable result = NullWritable.get();

        public void reduce(Text key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException
        {
            context.write(key, result);
        }
    }

    public static void main(String[] args) throws Exception
    {
    Configuration conf = new Configuration();
    GenericOptionsParser optionParser = new GenericOptionsParser(conf, args);
    String[] remainingArgs = optionParser.getRemainingArgs();
//  Job job = Job.getInstance(conf, "word sort");
    Job job = new Job(conf, "word sort");
    job.setJarByClass(AmaSort.class);
    job.setMapperClass(LineMapper.class);
    // job.setCombinerClass(ForwardReducer.class);
    job.setReducerClass(ForwardReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(NullWritable.class);

    FileInputFormat.addInputPath(job, new Path(remainingArgs[0]));
    FileOutputFormat.setOutputPath(job, new Path(remainingArgs[1]));

    System.exit(job.waitForCompletion(true) ? 0 : 1);
    }

}
prashant khunt
  • 154
  • 3
  • 8
  • Thank you @prashant it worked But there is alot of data are delted (my original dataset size 36MB the sorted data only 3.6Mb) and i got the data in 1st of file 0,tcp,xxxxx.0,1,111 then 'At' attribute.... and last 'At' data I want in this order 'At'attribute then one line 'At' date finally the rest of data Could you help me please to resolve this ? or if you want send me your email to contact each other – BigBosss Dec 16 '15 at 14:51
  • As we are performing the sorting using keys, by default it will sort the key using `RowComparator` but if you want to sort them using your custom sort, you can implement your own `SortComparator` please have a look link, http://stackoverflow.com/questions/16184745/what-is-difference-between-sort-comparator-and-group-comparator-in-hadoop by the way, you can contact me on prashant.n.khunt@gmail.com – prashant khunt Dec 17 '15 at 05:06
  • Can you please also give some example of lines which are deleted from original data? and the size might be reduced because duplicates are removed from data. – prashant khunt Dec 17 '15 at 05:08
  • thank you brother i send you mail now i hope i'll receive your replay so soon – BigBosss Dec 17 '15 at 13:21