You have the following possibilities to add a new column:
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
df = spark.createDataFrame([[1, 2], [3, 4]], ['col1', 'col2'])
df.show()
+----+----+
|col1|col2|
+----+----+
| 1| 2|
| 3| 4|
+----+----+
-- Using the method withColumn
:
import pyspark.sql.functions as F
df.withColumn('col3', F.col('col2') - F.col('col1')) # col function
df.withColumn('col3', df['col2'] - df['col1']) # bracket notation
df.withColumn('col3', df.col2 - df.col1) # dot notation
-- Using the method select
:
df.select('*', (F.col('col2') - F.col('col1')).alias('col3'))
The expression '*'
returns all columns.
-- Using the method selectExpr
:
df.selectExpr('*', 'col2 - col1 as col3')
-- Using SQL:
df.createOrReplaceTempView('df_view')
spark.sql('select *, col2 - col1 as col3 from df_view')
Result:
+----+----+----+
|col1|col2|col3|
+----+----+----+
| 1| 2| 1|
| 3| 4| 1|
+----+----+----+