After asking this question i was so confused, that decided to build similar test for a C compiler program. This is my code :
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <stdlib.h>
#define SUMMATIONS 20000000
int main() {
static int speedups[2101] = { 0 };
srand((unsigned)time(NULL));
while (1) {
unsigned int t1, t2, t3, t4;
signed int tmp, i, n1, n2;
// Slow version
t1 = clock();
for (n1 = rand() % 50, i = 0; i < SUMMATIONS; i++) {
n1 += 3 * i * i;
}
t2 = clock();
// Optimized version
t3 = clock();
for (n2 = rand() % 50, i = 0; i < SUMMATIONS; i++) {
n2 += i * i;
}
n2 *= 3;
t4 = clock();
// gather speedup statistics
if ((int)(t2 - t1) != 0) {
tmp = (int)(100.0f * ((float)(t2 - t1) - (float)(t4 - t3)) / (float)(t2 - t1));
tmp = tmp < -100 ? -100 : tmp > 100 ? 100 : tmp;
tmp = (tmp >= 0 ? 1000 : 2000) + abs(tmp);
speedups[tmp]++;
}
// output statistics
for (i = 0; i < 2101; i++) {
if (speedups[i] != 0) {
char s = i / 1000 == 1 ? '+' : i / 1000 == 2 ? '-' : '?';
printf("%c%i : %i\n", s, i % 1000, speedups[i]);
}
}
printf("error %i ******************\n", abs(n2-n1));
}
return 0;
}
Compiled under GCC with options -O3 -march=native
EDIT
Test code changed so that error value could be known only at run-time (and not in compile-time), so that GCC optimizer could not delete for loop's code.
Results
When run - recalculates counters of CPU hits into specific speedup value and outputs counters table. If we draw CPU hits Vs Speedup values,- we'll get such graph:
So GCC made program produces ~ 20% speedup on average.
Question
Should we expect speedup in CPU ? (As predicted by GCC compiled program)