I read some code for threadpool in github.
here is a good one, which revised by me.
#pragma once
#include <vector>
#include <queue>
#include <memory>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <future>
#include <functional>
#include <stdexcept>
class ThreadPool {
public:
ThreadPool(size_t);
template<class F, class... Args>
auto enqueue(F&& f, Args&&... args) -> std::future<typename std::result_of<F(Args...)>::type>; // -> apparently define return type is future
~ThreadPool();
private:
// need to keep track of threads so we can join them
std::vector< std::thread > workers;
// the task queue
std::queue< std::function<void()> > tasks;
// synchronization
std::mutex queue_mutex;
std::condition_variable condition;
bool stop;
};
// the constructor just launches some amount of workers
inline ThreadPool::ThreadPool(size_t threads) : stop(false) {
for(size_t i = 0;i<threads;++i)
workers.emplace_back( // thread variable, emplace back just pass in a function for thread to construct
[this] {
for(;;) {
std::function<void()> task;
{
std::unique_lock<std::mutex> lock(this->queue_mutex);
this->condition.wait(lock, [this] { return this->stop || !this->tasks.empty(); });
// wait stop true or task not empty
if(this->stop && this->tasks.empty()) return; // only on stop and empty task, perfect exit
// if only stop, still will hanle left task
task = std::move(this->tasks.front()); // get the first task
this->tasks.pop();
}
task();
}
});
}
// add new work item to the pool
template<class F, class... Args>
auto ThreadPool::enqueue(F&& f, Args&&... args) -> std::future<typename std::result_of<F(Args...)>::type> {
using return_type = typename std::result_of<F(Args...)>::type;
auto task = std::make_shared< std::packaged_task<return_type()> >(std::bind(std::forward<F>(f), std::forward<Args>(args)...));
std::future<return_type> res = task->get_future();
{
std::unique_lock<std::mutex> lock(queue_mutex);
// don't allow enqueueing after stopping the pool
if(stop) throw std::runtime_error("enqueue on stopped ThreadPool");
tasks.emplace([task](){ (*task)(); });
}
condition.notify_one();
return res;
}
// the destructor joins all threads
inline ThreadPool::~ThreadPool() {
{
std::unique_lock<std::mutex> lock(queue_mutex);
stop = true;
}
condition.notify_all();
for(std::thread &worker: workers) worker.join();
}
i use it for speeding up my code. for example, i have 1000 independent tasks, which cost about 1s for each one.
I found use this threadpool, it's slower than just one loop.
so, i write some small code to check.
here is my code.
#include "./time_util.h" // this is a timer class, just use to get time
#include "./thread_pool.hpp" // this is the class Threadpool header
size_t i = 0;
void test() {
// printf("hah\n");
i++;
}
#define N 1000000
void fun1() {
printf("func1\n");
ThreadPool pool(10);
for (int i = 0; i < N; ++i) {
pool.enqueue(test);
}
}
void fun2() {
printf("func2\n");
for (int i = 0; i < N; ++i) {
test();
}
}
int main(int argc, char** argv) {
util::time_util::Timer t;
t.StartTimer();
if (argc == 1) {
fun1();
} else {
fun2();
}
printf("i=%d\n", i);
t.EndTimer("1");
}
the loop version cost 0.005s threadpool cost 5s.
my machine 's cpu is 2.2G 24core(48thread), centos7.
and, the result of threadpool is incorrect(that is ok, since i dont add lock)
can you explain this? does this mean for small task, i cant use threadpool to speed up?