0

I am converting a Dataset<T> to JavaRDD<T>.

I am using toJavaRDD() method for same.

once the conversion is done I am calling collect() method on RDD.

At that time below exception is generated :

CodeGenerator: failed to compile: org.codehaus.commons.compiler.CompileException: File 'generated.java', Line 2114, Column 71: No applicable constructor/method found for actual parameters "long"; candidates are: "public static java.sql.Date org.apache.spark.sql.catalyst.util.DateTimeUtils.toJavaDate(int)"
org.codehaus.commons.compiler.CompileException: File 'generated.java', Line 2114, Column 71: No applicable constructor/method found for actual parameters "long"; candidates are: "public static java.sql.Date org.apache.spark.sql.catalyst.util.DateTimeUtils.toJavaDate(int)"
    at org.codehaus.janino.UnitCompiler.compileError(UnitCompiler.java:11821)

I can see first 1000 lines of generated.java file in eclipse console, that file is generated by org.apache.spark.sql.catalyst.expressions.codegen.CodeGenerator

However the error is on Line 2114 as mentioned in exception.

Below line is printed in console at the end of file : [truncated to 1000 lines (total lines is 3381)]

My question is how can I view all the lines of generated.java file , in order to resolve my error.

Spark version : 2.3.0

Let me know if you require more details.

EDIT:

generated.java file (few lines out of 1000) From eclipse console.

18/07/18 16:48:30 INFO CodeGenerator:

/* 001 */ public java.lang.Object generate(Object[] references) {
/* 002 */   return new SpecificSafeProjection(references);
/* 003 */ }
/* 004 */
/* 005 */ class SpecificSafeProjection extends org.apache.spark.sql.catalyst.expressions.codegen.BaseProjection {
/* 006 */
/* 007 */   private Object[] references;
/* 008 */   private InternalRow mutableRow;
/* 009 */   private boolean resultIsNull;
/* 010 */   private long argValue;
/* 011 */   private boolean resultIsNull1;
/* 012 */   private long argValue1;
/* 013 */   private boolean resultIsNull2;
/* 014 */   private long argValue2;
/* 015 */   private boolean resultIsNull3;
/* 016 */   private long argValue3;
/* 017 */   private boolean resultIsNull4;
/* 018 */   private long argValue4;
/* 019 */   private boolean resultIsNull5;
/* 020 */   private long argValue5;
/* 021 */   private boolean resultIsNull6;
/* 022 */   private long argValue6;
/* 023 */   private boolean resultIsNull7;
/* 024 */   private long argValue7;
/* 025 */   private boolean resultIsNull8;
/* 026 */   private long argValue8;
/* 027 */   private boolean resultIsNull9;
/* 028 */   private long argValue9;
/* 029 */
/* 030 */   public SpecificSafeProjection(Object[] references) {
/* 031 */     this.references = references;
/* 032 */     mutableRow = (InternalRow) references[references.length - 1];
/* 033 */
/* 034 */
/* 035 */   }
/* 036 */
/* 037 */   public void initialize(int partitionIndex) {
/* 038 */
/* 039 */   }
/* 040 */
/* 041 */   public java.lang.Object apply(java.lang.Object _i) {
/* 042 */     InternalRow i = (InternalRow) _i;
/* 043 */
/* 044 */     com.sample.RuleParamsBean value274 = InitializeJavaBean(i);
/* 045 */     if (false) {
/* 046 */       mutableRow.setNullAt(0);
/* 047 */     } else {
/* 048 */
/* 049 */       mutableRow.update(0, value274);
/* 050 */     }
/* 051 */
/* 052 */     return mutableRow;
/* 053 */   }
/* 054 */
/* 055 */
/* 056 */   private void initializeJavaBean_4(InternalRow i, com.sample.RuleParamsBean javaBean) {
/* 057 */
/* 058 */
/* 059 */     boolean isNull20 = i.isNullAt(15);
/* 060 */     Decimal value20 = isNull20 ? null : (i.getDecimal(15, 5, 0));
/* 061 */     boolean isNull18 = false;
/* 062 */     org.apache.spark.sql.types.Decimal value18 = null;
/* 063 */     if (!false && isNull20) {
/* 064 */
/* 065 */       final org.apache.spark.sql.types.Decimal value21 = null;
/* 066 */       isNull18 = true;
/* 067 */       value18 = value21;
/* 068 */     } else {
/* 069 */
/* 070 */       final org.apache.spark.sql.types.Decimal value23 = false ? null : new org.apache.spark.sql.types.Decimal();
/* 071 */       org.apache.spark.sql.types.Decimal javaBean1 = value23;
/* 072 */       if (!false) {
/* 073 */
/* 074 */       }
/* 075 */       isNull18 = false;
/* 076 */       value18 = value23;
/* 077 */     }
/* 078 */     javaBean.setMESSAGE_NBR(value18);
/* 079 */
/* 080 */
/* 081 */     boolean isNull25 = i.isNullAt(86);
/* 082 */     UTF8String value25 = isNull25 ? null : (i.getUTF8String(86));
/* 083 */     boolean isNull24 = true;
/* 084 */     java.lang.String value24 = null;
/* 085 */     if (!isNull25) {
/* 086 */
/* 087 */       isNull24 = false;
/* 088 */       if (!isNull24) {
/* 089 */
/* 090 */         Object funcResult8 = null;
/* 091 */         funcResult8 = value25.toString();
Raj
  • 707
  • 6
  • 23
  • You may be interested in having a look here: https://stackoverflow.com/questions/48026060/spark-csv-no-applicable-constructor-method-found-for-actual-parameters – stefanobaghino Jul 18 '18 at 13:07
  • @stefanobaghino it is similar but in my case I am not using inferschema option. – Raj Jul 19 '18 at 04:00
  • @stefanobaghino I am loading the data from database table. – Raj Jul 19 '18 at 04:52

1 Answers1

1

In my Typed Java file I had member with type java.sql.Date, which was causing the issue I changed it to java.sql.Timestamp and it worked fine.

Reference : http://mail-archives.apache.org/mod_mbox/spark-issues/201801.mbox/%3CJIRA.13127872.1514830615000.556367.1514906160358@Atlassian.JIRA%3E

Raj
  • 707
  • 6
  • 23