I am using this crude monte-carlo integration technique to find out the value of $\pi$ and noticed that the integration value steadily deviates from the actual value as I increase the number of sample points. The code is:
#include<iostream>
#include<cmath>
#include<cstdlib>
using namespace std;
float f(float x)//definition of the integrand
{
return sqrt(1-x*x);
}
float rand1()//random number generator between 0 and 1
{
float s=rand();
return s/(RAND_MAX+1.0);
}
float calcint(float xi,float xf,float yi,float yf,float N)//integrator
{
float n=0;
for(int i=0;i<N;i++)
{
float x=(xf-xi)*rand1();float y=(yf-yi)*rand1();
if (y<f(x))
{
n=n+1;
}
}
return n/N*(xf-xi)*(yf-yi);
}
int main()
{
float N=100000000;
for (int i=1; i<N; i=i+N/10)//lists integration value for different sampling
{
cout<<i<<"\t"<<4*calcint(0,1,0,1,i)<<endl;
}
return 0;
}
The output is,
10000000 3.14188
20000000 3.14059
30000000 2.23696
40000000 1.67772
50000000 1.34218
60000000 1.11848
70000000 0.958698
80000000 0.838861
90000000 0.745654
Why does it happen? Are Monte-carlo integration techniques guaranteed to converge with larger number of sample points?