- This answer is is 8x faster than the other solution for a dataframe with 100k rows
- The other implementation works, but uses
.apply
twice and a list comprehension, which are slow, compared to vectorized methods.
Explanation
.apply(literal_eval)
converts the 'activity'
column from a strings
to a python literal (e.g. lists
of dicts
; '[{"name":"STILL","conf":100}]'
→ [{"name":"STILL","conf":100}]
)
.explode
separates the dicts
in each list
to separate rows
- Extract the
keys
and values
in the 'activity'
column into separate columns and then .join
the columns back to df
- The timing analysis of this answer shows the fastest way to extract a column of single level
dicts
to a dataframe is with pd.DataFrame(df.pop('activity').values.tolist())
.pivot
the df
into a wide format
- Change
dfp.columns.name
from 'name'
to None
- this is cosmetic, and can be removed
- This was performed in pandas 1.2.0
import pandas as pd
from ast import literal_eval
# test data
data = {'id': [4, 4, 4, 6, 6, 8, 9], 'time': [1596213715048, 1596213739171, 1596213755797, 1596214842817, 1596214931090, 1596214957246, 1596215304418], 'activity': ['[{"name":"STILL","conf":100}]', '[{"name":"STILL","conf":54},{"name":"ON_FOOT","conf":19},{"name":"WALKING","conf":19},{"name":"ON_BICYCLE","conf":9},{"name":"IN_VEHICLE","conf":8},{"name":"UNKNOWN","conf":3}]', '[{"name":"STILL","conf":97},{"name":"UNKNOWN","conf":2},{"name":"IN_VEHICLE","conf":1}]', '[{"name":"STILL","conf":100}]', '[{"name":"STILL","conf":34},{"name":"IN_VEHICLE","conf":28},{"name":"ON_FOOT","conf":15},{"name":"WALKING","conf":15},{"name":"ON_BICYCLE","conf":8},{"name":"UNKNOWN","conf":3}]', '[{"name":"STILL","conf":100}]', '[{"name":"STILL","conf":100}]']}
df = pd.DataFrame(data)
# function to transform column of strings
def test(df):
df.activity = df.activity.apply(literal_eval)
df = df.explode('activity', ignore_index=True)
df = df.join(pd.DataFrame(df.pop('activity').values.tolist()))
dfp = df.pivot(index=['id', 'time'], columns='name', values='conf').fillna(0).astype(int).reset_index()
dfp.columns.rename(None, inplace=True)
return dfp
# call the function
test(df)
# result
id time IN_VEHICLE ON_BICYCLE ON_FOOT STILL UNKNOWN WALKING
0 4 1596213715048 0 0 0 100 0 0
1 4 1596213739171 8 9 19 54 3 19
2 4 1596213755797 1 0 0 97 2 0
3 6 1596214842817 0 0 0 100 0 0
4 6 1596214931090 28 8 15 34 3 15
5 8 1596214957246 0 0 0 100 0 0
6 9 1596215304418 0 0 0 100 0 0
%%timeit
testing
import numpy as np
import random
import pandas
import json
from ast import literal_eval
# test data with 100000 rows
np.random.seed(365)
random.seed(365)
rows = 1000000
activity = ['[{"name":"STILL","conf":100}]', '[{"name":"STILL","conf":54},{"name":"ON_FOOT","conf":19},{"name":"WALKING","conf":19},{"name":"ON_BICYCLE","conf":9},{"name":"IN_VEHICLE","conf":8},{"name":"UNKNOWN","conf":3}]', '[{"name":"STILL","conf":97},{"name":"UNKNOWN","conf":2},{"name":"IN_VEHICLE","conf":1}]', '[{"name":"STILL","conf":100}]', '[{"name":"STILL","conf":34},{"name":"IN_VEHICLE","conf":28},{"name":"ON_FOOT","conf":15},{"name":"WALKING","conf":15},{"name":"ON_BICYCLE","conf":8},{"name":"UNKNOWN","conf":3}]', '[{"name":"STILL","conf":100}]', '[{"name":"STILL","conf":100}]']
data = {'time': pd.bdate_range('2021-01-15', freq='s', periods=rows),
'id': np.random.randint(10, size=(rows)),
'activity': [random.choice(activity) for _ in range(rows)]}
df = pd.DataFrame(data)
# test the function in this answer
%%timeit -r1 -n1 -q -o
test(df)
[out]:
<TimeitResult : 31.8 s ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)>
# test the implementation from the other answer
def flatten_json_to_dict(s):
return {obj['name']: obj['conf'] for obj in json.loads(s)}
def nick(df):
expanded = df['activity'].apply(flatten_json_to_dict).apply(pd.Series)
df = df.join(expanded)
df = df.drop('activity', axis=1)
df = df.fillna(0)
return df
%%timeit -r1 -n1 -q -o
nick(df)
[out]:
<TimeitResult : 4min 28s ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)>