this piece of code used to demonstrate algorithm complexity and Measuring the Run Time of an Algorithm comes from a book FUNDAMENTALS OF PYTHON: FROM FIRST PROGRAMS THROUGH DATA STRUCTURES
"""
File: timing1.py
Prints the running times for problem sizes that double,
using a single loop.
"""
import time
problemSize = 10000000
print "%12s%16s" % ("Problem Size", "Seconds")
for count in xrange(5):
start = time.time()
# The start of the algorithm
work = 1
for x in xrange(problemSize):
work += 1
work -= 1
# The end of the algorithm
elapsed = time.time() - start
print "%12d%16.3f" % (problemSize, elapsed)
problemSize *= 2
this piece of code works well and I am trying to do the similar trial with C++. here is the code (snippet_2)
#include <iostream>
#include <chrono>
using namespace std;
using namespace std::chrono;
void functiona()
{
long long number = 0;
long long problemSize = 100000000000;
for( long long i = 0; i < problemSize; ++i )
{
for(long long j = 0; j < problemSize; j++)
{
for(long long k = 0; k < problemSize; k++)
{
for(long long l = 0; l < problemSize; l++)
{
for(long long l = 0; l < problemSize; l++)
{
number++;
number--;
}
}
}
}
}
}
int main()
{
high_resolution_clock::time_point t1 = high_resolution_clock::now();
functiona();
high_resolution_clock::time_point t2 = high_resolution_clock::now();
auto duration = duration_cast<microseconds>( t2 - t1 ).count();
cout << duration;
return 0;
}
I guess the problemSize is large enough though, snippet_2 outputs 0.
what am I missing?