//finding closest element
int closest(vector<int> &s1, int x)
{
int dif = INT_MAX;
auto j = lower_bound(s1.begin(), s1.end(), x);
if(j == s1.end())
dif = min(dif, abs(*(--j) - x));
else if(j == s1.begin())
dif = min(dif, abs(*j - x));
else
{
dif = min(dif, abs(*j - x));
dif = min(dif, abs(*(--j) - x));
}
return dif;
}
void dfs(vector<vector<int>> &G, vector<int> &vis, int i, vector<int> &s1 , vector<int> &s2, int& dif1, int &dif2)
{
vis[i] = 1;
dif1 = min(dif1, closest(s1, i));
dif2 = min(dif2, closest(s2, i));
stack<int> stk;
stk.push(i);
while(!stk.empty())
{
int top = stk.top();
stk.pop();
for (auto &j : G[top])
{
if(vis[j] == 0)
{
vis[j] = 1;
stk.push(j);
dif1 = min(dif1, closest(s1, j));
dif2 = min(dif2, closest(s2, j));
}
}
}
}
int main()
{
// some code omitted
for (int i = 0; i < n; i++)
{
if(vis[i] == 0)
{
int dif1 = INT_MAX;
int dif2 = INT_MAX;
dfs(G, vis, i, s1, s2, dif1, dif2);
ans = min(ans, dif1*1LL*dif1+dif2*1LL*dif2);
}
}
}
This was a solution to Graph theory problem related to connectivity.
Initially s1 and s2 were sets, obtained from a different part of the code(not relevant here). The program was very slow in the loop inside the main, as I observed using a debugger.
On a whim, I changed the sets to vector and sorted them after they were obtained. Miraculously, the program got 10 times faster. lower_bound function has same time complexity for set and sorted vector. I don’t understand why did this happen. Worst case time complexity is VlogV+E in both case, right? Is it because of some optimization made by compiler or am I missing something?