Suppose we have an app running multiple network clients which make a loop: send a request, then pauses for specified time, and again.
Example (not all the code):
#define WAIT_INTERVAL 1000
class client()
{
public:
explicit client(boost::asio::io_service & io_service)
: io_service_(io_service)
{
working_ = true;
};
run(boost::asio::yield_context t_yield)
{
while (working_) {
// async wait
boost::asio::deadline_timer timer(io_service_, boost::posix::milliseconds(WAIT_INTERVAL));
timer.async_wait(t_yield);
do_work();
}
};
private:
void do_work()
{
// make work
}
bool working_{ false };
boost::asio::io_service & io_service_;
}
class app()
{
public:
// code omitted
prepare(size_t number_of_clients)
{
clients_.reserve(number_of_clients);
for (size_t i = 0; i < number_of_clients; ++i)
{
clients_.emplace_back(io_service_);
}
}
run()
{
for (auto & client : clients_)
{
boost::asio::spawn(io_service_, [&client](boost::asio::yield_context t_yield)
{
client.run(t_yield);
});
}
}
private:
boost::asio::io_service io_service_;
std::vector<client> clients_;
}
Question is: when running many clients (like 100 or so) the deadline_timer
actually waits for several seconds, not near 1000 ms, but about 9000 ms and even more.
How to avoid this or how to rewrite program so that each client will wait for given wait interval plus-minus some little difference?