I'm trying to solve https://open.kattis.com/problems/rootedsubtrees and part of the solution requires finding the minimum distance between any 2 nodes on the tree. To do this, I'm using Lowest Common Ancestor as a subroutine. Part of my LCA code uses a DFS to traverse the tree. Somehow, running this code on a line graph of size 200000 leads to a segmentation fault during the DFS section of the code.
#pragma GCC optimize("Ofast")
#pragma GCC target("sse,sse2,sse3,ssse3,sse4,popcnt,abm,mmx,avx,avx2,fma")
#include <bits/stdc++.h>
using namespace std;
typedef long long ll;
typedef vector<int> vi;
#define fast_cin() \
ios_base::sync_with_stdio(false); \
cin.tie(NULL); \
cout.tie(NULL);
int n, q, idx;
vector<int> adjlist[200009];
vector<int> L, E,
H; // depth at traversal index, node at traversal index, first traversal index of node
void dfs(int cur, int depth) {
cout << "dfs " << cur << " " << idx << endl;
H[cur] = idx;
E[idx] = cur;
L[idx++] = depth;
for (int &nxt : adjlist[cur]) {
if (H[nxt] != -1) continue;
dfs(nxt, depth + 1);
E[idx] = cur; // backtrack to current node
L[idx++] = depth;
}
}
class SparseTable { // OOP style
private:
vi A, P2, L2;
vector<vi> SpT; // the Sparse Table
public:
SparseTable() {} // default constructor
SparseTable(vi &initialA) { // pre-processing routine
A = initialA;
int n = (int)A.size();
int L2_n = (int)log2(n) + 1;
P2.assign(L2_n, 0);
L2.assign(1 << L2_n, 0);
for (int i = 0; i <= L2_n; ++i) {
P2[i] = (1 << i); // to speed up 2^i
L2[(1 << i)] = i; // to speed up log_2(i)
}
for (int i = 2; i < P2[L2_n]; ++i)
if (L2[i] == 0) L2[i] = L2[i - 1]; // to fill in the blanks
// the initialization phase
SpT = vector<vi>(L2[n] + 1, vi(n));
for (int j = 0; j < n; ++j) SpT[0][j] = j; // RMQ of sub array [j..j]
// the two nested loops below have overall time complexity = O(n log n)
for (int i = 1; P2[i] <= n; ++i) // for all i s.t. 2^i <= n
for (int j = 0; j + P2[i] - 1 < n; ++j) { // for all valid j
int x = SpT[i - 1][j]; // [j..j+2^(i-1)-1]
int y = SpT[i - 1][j + P2[i - 1]]; // [j+2^(i-1)..j+2^i-1]
SpT[i][j] = A[x] <= A[y] ? x : y;
}
}
int RMQ(int i, int j) {
int k = L2[j - i + 1]; // 2^k <= (j-i+1)
int x = SpT[k][i]; // covers [i..i+2^k-1]
int y = SpT[k][j - P2[k] + 1]; // covers [j-2^k+1..j]
return A[x] <= A[y] ? x : y;
}
};
int LCA(int u, int v, SparseTable &SpT) {
if (H[u] > H[v]) swap(u, v);
return E[SpT.RMQ(H[u], H[v])];
}
int APSP(int u, int v, SparseTable &SpT) {
int ancestor = LCA(u, v, SpT);
return L[H[u]] + L[H[v]] - 2 * L[H[ancestor]];
}
int main() {
fast_cin();
cin >> n >> q;
L.assign(2 * (n + 9), 0);
E.assign(2 * (n + 9), 0);
H.assign(n + 9, -1);
idx = 0;
int u, v;
for (int i = 0; i < n - 1; i++) {
cin >> u >> v;
u--;
v--;
adjlist[u].emplace_back(v);
adjlist[v].emplace_back(u);
}
dfs(0, 0);
SparseTable SpT(L);
ll d;
while (q--) {
cin >> u >> v;
u--;
v--;
d = (ll) APSP(u, v, SpT) + 1;
cout << (ll) n - d + (d) * (d + 1) / 2 << endl;
}
return 0;
}
Using the following Python Code to generate the input of a large line graph
n = 200000
q = 1
print(n, q)
for i in range(1, n):
print(i, i+1)
print(1, 200000)
I get the following last few lines of output before my program crashes.
.
.
.
dfs 174494 174494
dfs 174495 174495
dfs 174496 174496
dfs 174497 174497
dfs 174498 174498
Segmentation fault (core dumped)
Is the problem an issue of exhausting stack space with the recursion or something else?