I am trying to find a fastest way to calculate a number of unique values in a huge table, where the number of rows can easily be between 100 million to 10 billion. In this particular case, I am dealing with 128-bit integers.
I'm trying to understand, why pandas approach achieves a better result (tested with 1 million rows) as it appears to make operations on columns level, which feels inefficient. How this should be implemented in c++? My initial attempt to create c++ version was extremely slow (slower than Python). I used std:set, std:pair and std:map.
The first attempt looks like this:
import time
from collections import defaultdict as ddict
import pandas as pd
df = pd.DataFrame([]) # Load table with two columns containing 128 bit integers.
class Timer:
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
print("time elapsed:" ,self.interval)
with Timer():
print(df['left'].nunique())
print(df['right'].nunique())
left_grp = df.groupby('left')
print(left_grp['right'].nunique().max())
right_grp = df.groupby('right')
print(right_grp['left'].nunique().max())
Below is the pure Python example, which goes the array through row-by-row basis, which should to my understanding be more efficient. It is just 3 times slower than pandas version.
with Timer():
uniques1 = set()
uniques2 = set()
uniques3 = ddict(set)
uniques4 = ddict(set)
for i in range(len(ndarray)):
uniques1.add(ndarray[i]['left'])
uniques2.add(ndarray[i]['right'])
uniques3[ndarray[i]['left']].add(ndarray[i]['right'])
uniques4[ndarray[i]['right']].add(ndarray[i]['left'])
print(len(uniques1))
print(len(uniques2))
print(max(len(v) for v in uniques3.values()))
print(max(len(v) for v in uniques4.values()))
Any advice on how to implement the above pure Python code efficiently in c++? My attempt with c++ below.
#include <stdint.h>
#include <map>
#include <bits/stdc++.h>
#include <algorithm>
typedef std::pair<uint64_t, uint64_t> uint128_t;
typedef std::set<uint128_t> set128_t;
typedef std::map<uint128_t, set128_t > map128_t;
namespace nunique_highperf{
int get_max(const map128_t& map) {
int best = 0;
auto it = map.begin();
while (it != map.end()) {
best = std::max(best, (int)it->second.size());
it++;
}
return best;
}
void default_update(map128_t &map, uint128_t left, uint128_t right) {
set128_t temp;
map.emplace(left, temp);
temp = map[left];
temp.insert(right);
map[left] = temp;
}
void uniques_from_table(uint64_t **sessions, int rows) {
set128_t uniques1;
set128_t uniques2;
map128_t uniques3;
map128_t uniques4;
for (int i=0; i<rows; i++) {
uint128_t left = std::make_pair(sessions[i][0], sessions[i][1]);
uint128_t right = std::make_pair(sessions[i][2], sessions[i][3]);
uniques1.insert(left);
uniques2.insert(right);
default_update(uniques3, left, right);
default_update(uniques4, right, left);
}
printf("%d\n", uniques1.size());
printf("%d\n", uniques2.size());
printf("%d\n", get_max(uniques3));
printf("%d\n", get_max(uniques4));
}
}
In actual implementation, there will be multiple columns (not 2 as in the example), from which the number of unique elements is counted, hence I'm not just asking the fastest way to calculate distinct values for a single column, but over multiple columns as well as column pairs.
EDIT: Added c++ code