I have joined 2 dataframes and now trying to get a report comprising of columns from my both data frames. I tried using .select (cols = String* ) but it is not working.
Also the method described here doesnt seem to solve my issue.
Below is the code. val full_report is where I need to get the columns.
import org.apache.spark.sql.types._
object read_data {
def main (args:Array[String]) {
val spark = org.apache.spark.sql.SparkSession.builder
.master("local")
.appName("Spark CSV Reader")
.getOrCreate;
val customSchema = StructType(Array(
StructField("order_id", IntegerType, true),
StructField("parent_order_uuid", StringType, true),
StructField("company", StringType, true),
StructField("country_id", IntegerType, true)))
val readogp = spark.read.format("csv")
.option("header", "false")
.schema(customSchema)
.load("/home/cloudera/Desktop/ogp_csv.csv")
readogp.show()
val read_country = spark.read.format("csv")
.option("header", "true")
.load("/home/cloudera/Desktop/country.csv")
read_country.show()
println("************************************************************************")
val full_report = readogp.join(read_country, readogp.col("country_id") === read_country.col("country_id"))
.select(readogp.select("order_id" + "parent_order_id"))
full.show()
}
}
Please let me know how can I overcome this hurdle.