1

I am parsing Azure EventHub avro message. The last column in a Array. I am trying to flatten it.

Before:

{"records":[{"time":"2020-01-28T04:50:20.0975886Z","resourceId":"/SUBSCRIPTIONS/xxxxxxxxxxxx/RESOURCEGROUPS/xxxxx-xxxxxxxI/PROVIDERS/MICROSOFT.COMPUTE/DISKS/7C3E07DE8xxxxxxx-0-SCRATCHVOLUME","operationName":"MICROSOFT.COMPUTE/DISKS/DELETE","category":"Administrative","resultType":"Start","resultSignature":"Started.","durationMs":"0","callerIpAddress":"43.121.152.99","correlationId":"xxxxxxx"},{"time":"2020-01-28T04:50:20.1122888Z","resourceId":"/SUBSCRIPTIONS/xxxxxxxxxxxx/RESOURCEGROUPS/xxxxx-xxxxxxxI/PROVIDERS/MICROSOFT.COMPUTE/DISKS/7C3E07DE8xxxxxxx-0-SCRATCHVOLUME","operationName":"MICROSOFT.COMPUTE/DISKS/DELETE","category":"Administrative","resultType":"Success","resultSignature":"Succeeded.NoContent","durationMs":"14","callerIpAddress":"43.121.152.99","correlationId":"xxxxxxx"}]}

This is what I have came up with, I think I am very close. I got the struct and I am able to remove the first value "records" but unable to handle the Array inside it.

from pyspark.sql.types import StringType, IntegerType, StructType, StructField
from pyspark.sql.functions import from_json, col
from pyspark.sql.functions import explode, flatten
from pyspark.sql.types import StringType, StructField, StructType, BooleanType, ArrayType, IntegerType

# Creates a DataFrame from a specified directory
df = spark.read.format("avro").load("/mnt/test/xxxxxx/xxxxxxxx/31.avro")

# cast a binary column(Body) into string
df = df.withColumn("Body", col("Body").cast("string"))

sourceSchema= StructType([
        StructField("records", ArrayType(
            StructType([
                StructField("time", StringType(), True),
                StructField("resourceId", StringType(), True),
                StructField("operationName", StringType(), True),
                StructField("category", StringType(), True),
                StructField("resultType", StringType(), True),
                StructField("resultSignature", StringType(), True),
                StructField("durationMs", StringType(), True),
                StructField("callerIpAddress", StringType(), True),
                StructField("correlationId", StringType(), True)
            ])
        ), True)
    ])

df = df.withColumn("Body", from_json(df.Body, sourceSchema))

# Flatten Body
for c in df.schema['Body'].dataType:
    df2 = df.withColumn(c.name, col("Body." + c.name))
    display(df2)

After:

[{"time":"2020-01-28T04:50:20.0975886Z","resourceId":"/SUBSCRIPTIONS/xxxxxxxxxxxx/RESOURCEGROUPS/xxxxx-xxxxxxxI/PROVIDERS/MICROSOFT.COMPUTE/DISKS/7C3E07DE8xxxxxxx-0-SCRATCHVOLUME","operationName":"MICROSOFT.COMPUTE/DISKS/DELETE","category":"Administrative","resultType":"Start","resultSignature":"Started.","durationMs":"0","callerIpAddress":"43.121.152.99","correlationId":"xxxxxxx"},{"time":"2020-01-28T04:50:20.1122888Z","resourceId":"/SUBSCRIPTIONS/xxxxxxxxxxxx/RESOURCEGROUPS/xxxxx-xxxxxxxI/PROVIDERS/MICROSOFT.COMPUTE/DISKS/7C3E07DE8xxxxxxx-0-SCRATCHVOLUME","operationName":"MICROSOFT.COMPUTE/DISKS/DELETE","category":"Administrative","resultType":"Success","resultSignature":"Succeeded.NoContent","durationMs":"14","callerIpAddress":"43.121.152.99","correlationId":"xxxxxxx"}]
tomarv2
  • 753
  • 3
  • 14
  • It's unclear which result you are expecting to get, maybe add an example by manually flatting part of the string – oshi2016 Jan 30 '20 at 04:00
  • I need each entry to go a separate column, like 'time' in one, 'resourceId' in another and go on.. – tomarv2 Jan 30 '20 at 04:13
  • does [this](https://stackoverflow.com/questions/45967562/how-to-use-from-json-with-schema-as-string-i-e-a-json-encoded-schema) answer your query? – samkart Jan 30 '20 at 04:31

2 Answers2

0

maybe try this:

import pandas as pd
from pandas.io.json import json_normalize
s = {"records":[{"time":"2020-01-28T04:50:20.0975886Z","resourceId":"/SUBSCRIPTIONS/xxxxxxxxxxxx/RESOURCEGROUPS/xxxxx-xxxxxxxI/PROVIDERS/MICROSOFT.COMPUTE/DISKS/7C3E07DE8xxxxxxx-0-SCRATCHVOLUME","operationName":"MICROSOFT.COMPUTE/DISKS/DELETE","category":"Administrative","resultType":"Start","resultSignature":"Started.","durationMs":"0","callerIpAddress":"43.121.152.99","correlationId":"xxxxxxx"},{"time":"2020-01-28T04:50:20.1122888Z","resourceId":"/SUBSCRIPTIONS/xxxxxxxxxxxx/RESOURCEGROUPS/xxxxx-xxxxxxxI/PROVIDERS/MICROSOFT.COMPUTE/DISKS/7C3E07DE8xxxxxxx-0-SCRATCHVOLUME","operationName":"MICROSOFT.COMPUTE/DISKS/DELETE","category":"Administrative","resultType":"Success","resultSignature":"Succeeded.NoContent","durationMs":"14","callerIpAddress":"43.121.152.99","correlationId":"xxxxxxx"}]}
json_normalize(s).values

The result you'll get is:

array([[list([{'time': '2020-01-28T04:50:20.0975886Z', 'resourceId': '/SUBSCRIPTIONS/xxxxxxxxxxxx/RESOURCEGROUPS/xxxxx-xxxxxxxI/PROVIDERS/MICROSOFT.COMPUTE/DISKS/7C3E07DE8xxxxxxx-0-SCRATCHVOLUME', 'operationName': 'MICROSOFT.COMPUTE/DISKS/DELETE', 'category': 'Administrative', 'resultType': 'Start', 'resultSignature': 'Started.', 'durationMs': '0', 'callerIpAddress': '43.121.152.99', 'correlationId': 'xxxxxxx'}, {'time': '2020-01-28T04:50:20.1122888Z', 'resourceId': '/SUBSCRIPTIONS/xxxxxxxxxxxx/RESOURCEGROUPS/xxxxx-xxxxxxxI/PROVIDERS/MICROSOFT.COMPUTE/DISKS/7C3E07DE8xxxxxxx-0-SCRATCHVOLUME', 'operationName': 'MICROSOFT.COMPUTE/DISKS/DELETE', 'category': 'Administrative', 'resultType': 'Success', 'resultSignature': 'Succeeded.NoContent', 'durationMs': '14', 'callerIpAddress': '43.121.152.99', 'correlationId': 'xxxxxxx'}])]],
  dtype=object)
oshi2016
  • 875
  • 2
  • 10
  • 20
0

I see lot of people have this question, hope this helps.

# Read Event Hub's stream
# if reading from file: Supported file formats are text, csv, json, orc, parquet

conf = {}
conf["eventhubs.connectionString"] = "Endpoint=sb://xxxxxxxxxxxx.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=jxxxxxxxxxxxx/xxxxxxxxxxxx=;EntityPath=eventhub"

# define dataframe for reading stream
read_df = (
  spark
    .readStream
    .format("eventhubs")
    .options(**conf)
    .option('multiLine', True)
    .option('mode', 'PERMISSIVE')
    .load()
)

# define struct for writing
sourceSchema= StructType([
        StructField("records", ArrayType(
            StructType([
                StructField("time", StringType(), True),
                StructField("resourceId", StringType(), True),
                StructField("operationName", StringType(), True),
                StructField("category", StringType(), True),
                StructField("resultType", StringType(), True),
                StructField("resultSignature", StringType(), True),
                StructField("durationMs", StringType(), True),
                StructField("callerIpAddress", StringType(), True),
                StructField("correlationId", StringType(), True)
            ])
        ), True)
    ])

# convert binary to string
decoded_df = read_df.select(F.from_json(F.col("body").cast("string"), sourceSchema).alias("payload"))

# write to memory
query1 = (
  decoded_df
    .writeStream
    .format("memory")
    .queryName("read_hub")
    .start()
)
tomarv2
  • 753
  • 3
  • 14