I reviewed the get_dummies source code briefly, and I think it may not be taking full advantage of the sparsity for your use case. The following approach may be faster, but I did not attempt to scale it all the way up to the 19M records you have:
import numpy as np
import pandas as pd
import scipy.sparse as ssp
np.random.seed(1)
N = 10000
dfa = pd.DataFrame.from_dict({
'col1': np.random.randint(0, 27000, N)
, 'col2b': np.random.choice([1, 2, 3], N)
, 'target': np.random.choice([1, 2, 3], N)
})
# construct an array of the unique values of the column to be encoded
vals = np.array(dfa.col1.unique())
# extract an array of values to be encoded from the dataframe
col1 = dfa.col1.values
# construct a sparse matrix of the appropriate size and an appropriate,
# memory-efficient dtype
spmtx = ssp.dok_matrix((N, len(vals)), dtype=np.uint8)
# do the encoding. NB: This is only vectorized in one of the two dimensions.
# Finding a way to vectorize the second dimension may yield a large speed up
for idx, val in enumerate(vals):
spmtx[np.argwhere(col1 == val), idx] = 1
# Construct a SparseDataFrame from the sparse matrix and apply the index
# from the original dataframe and column names.
dfnew = pd.SparseDataFrame(spmtx, index=dfa.index,
columns=['col1_' + str(el) for el in vals])
dfnew.fillna(0, inplace=True)
UPDATE
Borrowing insights from other answers here and here, I was able to vectorize the solution in both dimensions. In my limited testing, I noted that constructing the SparseDataFrame seems to increase the execution time several fold. So, if you don't need to return a DataFrame-like object, you can save a lot of time. This solution also handles the case where you need to encode 2+ DataFrames into 2-d arrays with equal numbers of columns.
import numpy as np
import pandas as pd
import scipy.sparse as ssp
np.random.seed(1)
N1 = 10000
N2 = 100000
dfa = pd.DataFrame.from_dict({
'col1': np.random.randint(0, 27000, N1)
, 'col2a': np.random.choice([1, 2, 3], N1)
, 'target': np.random.choice([1, 2, 3], N1)
})
dfb = pd.DataFrame.from_dict({
'col1': np.random.randint(0, 27000, N2)
, 'col2b': np.random.choice(['foo', 'bar', 'baz'], N2)
, 'target': np.random.choice([1, 2, 3], N2)
})
# construct an array of the unique values of the column to be encoded
# taking the union of the values from both dataframes.
valsa = set(dfa.col1.unique())
valsb = set(dfb.col1.unique())
vals = np.array(list(valsa.union(valsb)), dtype=np.uint16)
def sparse_ohe(df, col, vals):
"""One-hot encoder using a sparse ndarray."""
colaray = df[col].values
# construct a sparse matrix of the appropriate size and an appropriate,
# memory-efficient dtype
spmtx = ssp.dok_matrix((df.shape[0], vals.shape[0]), dtype=np.uint8)
# do the encoding
spmtx[np.where(colaray.reshape(-1, 1) == vals.reshape(1, -1))] = 1
# Construct a SparseDataFrame from the sparse matrix
dfnew = pd.SparseDataFrame(spmtx, dtype=np.uint8, index=df.index,
columns=[col + '_' + str(el) for el in vals])
dfnew.fillna(0, inplace=True)
return dfnew
dfanew = sparse_ohe(dfa, 'col1', vals)
dfbnew = sparse_ohe(dfb, 'col1', vals)