0

I followed instructions at Call c function from Java

to create a JNI for a C++ function, HelloWorld example. It works fine as part of Java program. My programs are listed below:

////////////
class HelloWorld {
private native void myprint(char c);

public void from_java() {
    myprint('I');
}

public static void main(String[] args) {
    new HelloWorld().from_java();
}

static {
    System.loadLibrary("HelloWorld");
}
}

/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class HelloWorld */

#ifndef _Included_HelloWorld
#define _Included_HelloWorld
#ifdef __cplusplus
extern "C" {
#endif

JNIEXPORT void JNICALL Java_HelloWorld_myprint(JNIEnv *, jobject, char);

#ifdef __cplusplus
}
#endif
#endif

#include <stdio.h>
#include "HelloWorld.h"

JNIEXPORT void JNICALL   Java_HelloWorld_myprint(JNIEnv *env, jobject obj, char c)  {
 printf("Hello World! %c", c);
 printf("\n");

 return;
}

////////////

myprint(..) can be called fine from Java.

However, when I make it part of WordCount hadoop example then I get error.

My hadoop program is:

////////////
import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;


public class WordCount {


public static class TokenizerMapper
   extends Mapper<Object, Text, Text, IntWritable>{

   class HelloWorld {

   private native void myprint(char c);

       public void mymain() {
         myprint('I');
       }
  }

static {
  System.loadLibrary("HelloWorld");
}

private final static IntWritable one = new IntWritable(1);
private Text word = new Text();

public void map(Object key, Text value, Context context
                ) throws IOException, InterruptedException {
  StringTokenizer itr = new StringTokenizer(value.toString());

  HelloWorld obj = new HelloWorld();
  obj.mymain();

  while (itr.hasMoreTokens()) {
    word.set(itr.nextToken());
    context.write(word, one);
  }
}
}

public static class IntSumReducer
   extends Reducer<Text,IntWritable,Text,IntWritable> {
private IntWritable result = new IntWritable();

public void reduce(Text key, Iterable<IntWritable> values,
                   Context context
                   ) throws IOException, InterruptedException {
  int sum = 0;
  for (IntWritable val : values) {
    sum += val.get();
  }
  result.set(sum);
  context.write(key, result);
}
}

public  static void main(String[] args) throws Exception {

Configuration conf = new Configuration();

Job job = Job.getInstance(conf, "word count");
job.setJarByClass(WordCount.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));

System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
////////////

When I run this, I get following error:

17/09/02 09:36:57 INFO mapreduce.Job: Job job_local737469568_0001 failed with state FAILED due to: NA
17/09/02 09:36:57 WARN mapred.LocalJobRunner: job_local737469568_0001
java.lang.Exception: java.lang.UnsatisfiedLinkError:      WordCount$TokenizerMapper$HelloWorld.myprint(C)V
at   org.apache.hadoop.mapred.LocalJobRunner$Job.runTasks(LocalJobRunner.java:489)
at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:549)
Caused by: java.lang.UnsatisfiedLinkError:   WordCount$TokenizerMapper$HelloWorld.myprint(C)V
at WordCount$TokenizerMapper$HelloWorld.myprint(Native Method)
at WordCount$TokenizerMapper$HelloWorld.mymain(WordCount.java:26)
at WordCount$TokenizerMapper.map(WordCount.java:42)
at WordCount$TokenizerMapper.map(WordCount.java:18)
at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:146)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:787)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341)
at  org.apache.hadoop.mapred.LocalJobRunner$Job$MapTaskRunnable.run(LocalJobRunner.java:270)
Ab Ran
  • 1
  • I was able to solve the problem. It had to do with complete path to the function being called. i have created HelloWorld.h/.cpp separately from actual Hadoop program so the full path was not matching. Once I fixed that problem got solved. – Ab Ran Sep 02 '17 at 12:23
  • if that is the case , please put that as the answer for this quesition, and mark it as resolved – Remis Haroon - رامز Sep 03 '17 at 06:39
  • @AbRan you need to write your solution as an answer and accept this – Sergei Bubenshchikov Sep 04 '17 at 06:01
  • Sorry about the delay in posting the reply. Have posted the src code and correct steps for generating signature of C++ function which will be called through Java. – Ab Ran Sep 17 '17 at 06:26

1 Answers1

0
//// Need to do following
//// javah -jni WordCount
//// This will generate WordCount_TokenizerMapper_HelloWorld.h as follows

/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class WordCount_TokenizerMapper_HelloWorld */

#ifndef _Included_WordCount_TokenizerMapper_HelloWorld
#define _Included_WordCount_TokenizerMapper_HelloWorld
#ifdef __cplusplus
extern "C" {
#endif
/*
* Class:     WordCount_TokenizerMapper_HelloWorld
* Method:    myprint
* Signature: (C)V
*/

//// Note the function signature here. This is how JNI expects to find
//// this function in the .so created for C++ code.
//// BOTTOMLINE: Call javah on the actual Java src file from which you
////             want to call C++ function. DON'T DO IT SEPARATELY.
JNIEXPORT void JNICALL  Java_WordCount_00024TokenizerMapper_00024HelloWorld_myprint(JNIEnv *, jobject, jchar);

#ifdef __cplusplus
}
#endif
#endif
Ab Ran
  • 1