tp.schedule(&first_task);     tp.schedule(&second_task);      tp.schedule(boost::bind(task_with_parameter, 4));     // Wait until all tasks are finished.     tp.wait();     // Now all tasks are finished!         return(0);

1.3 任务返回值的获取:

一般异步调用中,返回值的获取有同步获取和异步获取两种形式。

同步获取返回值:

int task_int_23()
    cout<<"task_int_23()\n";
    return 23;
future<int> res = schedule(tp, &task_int_23);
res.wait();
cout<<"get res value:"<<res.get()<<endl;

1.4 异步获取返回值:

不知道是设计者就不打算使用异步回调获取返回值还是我看的不够仔细,异步获取返回值的方式还真没有找着,只好自己简单的写了一个回调的仿函数来实现异步返回值的获取。

//R为任务函数的返回值类型
template<class R>
class callback_task
    typedef boost::function<void (R)> callback;
    typedef boost::function<R ()> function;
private:
    callback c_;
    function f_;
public:
    //F: 任务执行函数 C:结果回调函数
    template<class F,class C>
    callback_task(F f,C c)
        f_ = f;
        c_ = c;
    void operator()()
        c_(f_());

通过这个对象可以很容易的实现异步结果的回调。

//task_int_23的结果回调函数 
void callback(int k)
    cout<<"get callback value:"<<k<<endl;
//通过回调的形式获取任务的返回值 
tp.schedule(callback_task<int>(&task_int_23,&callback));

2 boost::thread_group 以及io_service

2.1 例子1

#include <boost/shared_ptr.hpp>
#include <boost/make_shared.hpp>
#include <boost/thread.hpp>
#include <boost/bind.hpp>
#include <boost/asio.hpp>
#include <boost/move/move.hpp>
#include <iostream>
#include <unistd.h>
namespace asio = boost::asio; 
int sleep_print(int seconds) {
	std::cout << "goint to sleep (" << seconds << ")" << std::endl;
	sleep(seconds);
	std::cout << "wake up (" << seconds << ")" << std::endl;
	return 0;
typedef boost::packaged_task<int> task_t;
typedef boost::shared_ptr<task_t> ptask_t;
void push_job(int seconds, boost::asio::io_service& io_service, std::vector<boost::shared_future<int> >& pending_data) {
	ptask_t task = boost::make_shared<task_t>(boost::bind(&sleep_print, seconds));
	boost::shared_future<int> fut(task->get_future());
	pending_data.push_back(fut);
	io_service.post(boost::bind(&task_t::operator(), task));
int main() {
	boost::asio::io_service io_service;
	boost::thread_group threads;
	boost::asio::io_service::work work(io_service);
	for (int i = 0; i < boost::thread::hardware_concurrency() ; ++i)
		threads.create_thread(boost::bind(&boost::asio::io_service::run,
			&io_service));
	std::vector<boost::shared_future<int> > pending_data; // vector of futures
	sleep_print(2);
	push_job(3, io_service, pending_data);
	push_job(4, io_service, pending_data);
//	boost::thread task(boost::move(pt)); // launch task on a thread
	boost::wait_for_all(pending_data.begin(), pending_data.end());
	push_job(3, io_service, pending_data);
	push_job(4, io_service, pending_data);
	push_job(5, io_service, pending_data);
	boost::wait_for_all(pending_data.begin(), pending_data.end());
	return 0;

2.2 例子2

2.2.1 Work Queue

template <int NWorkers = 0>
class work_queue
public:
   work_queue()
      work_ctrl_ = new boost::asio::io_service::work(io_service_);
      int workers = boost::thread::hardware_concurrency();
      if(NWorkers > 0)
         workers = NWorkers;
      for (std::size_t i = 0; i < workers; ++i)
         threads_.create_thread(boost::bind(&asio::io_service::run, &io_service_));
   virtual ~work_queue()
      delete work_ctrl_;
   template <typename TTask>
   void add_task(TTask task)
      // c++11
      // io_service_.dispatch(std::move(task));
      io_service_.dispatch(task);
private:
   boost::asio::io_service io_service_;
   boost::thread_group threads_;
   boost::asio::io_service::work *work_ctrl_;

2.2.2 app

// application class
class myapp : work_queue<0> 
public:
   void add_result(vector< vector<double> > kernel2d)
      boost::lock_guard<boost::mutex> lock(mutex_);
      task_count_++;
      result_.push_back(kernel2d);
      if(task_count_== 3)
         cout << "all tasks are completed, waiting ctrl-c to display the results..." << endl;
   int operator()(const std::vector< application::application_ctrl::string_type >& args, 
                  application::application_ctrl& ctrl)
      // your application logic here!
      task_count_ = 0;
      // our tasks
      add_task(gaussian_blur<3>( boost::bind<void>( &myapp::add_result, this, _1 ))); 
      add_task(gaussian_blur<6>( boost::bind<void>( &myapp::add_result, this, _1 ))); 
      add_task(gaussian_blur<9>( boost::bind<void>( &myapp::add_result, this, _1 ))); 
      ctrl.wait_for_termination_request();
      return 0;
   int stop()
      std::cout << "Result..." << std::endl;
      for(int i = 0; i < result_.size(); ++i)
         cout << i << " : -----------------------" << std::endl;
         vector< vector<double> > & kernel2d = result_[i];
         for (int row = 0; row < kernel2d.size(); row++) 
            for (int col = 0; col < kernel2d[row].size(); col++)
               cout << setprecision(5) << fixed << kernel2d[row][col] << " ";
            cout << endl;
      return 1;
private:
   boost::mutex mutex_;  
   vector< vector< vector<double> > > result_;
   int task_count_;
}; // myapp

3 基于c++11 实现的线程池

主要步骤如下:

设定线程池中所提供的服务线程数

int threads = thread::hardware_concurrency();

每个线程都应该执行一个无限循环,无限循环中等待新任务到达,并执行任务

vector<thread> pool;
for (int i = 0; i < threads; i++)
   pool.push_back(thread(Infinite_loop_function));

无限循环function

while(true)
    unique_lock<mutex> lock(queue_mutex);
    condition.wait(lock,[]{return !Queue.empty()});
    Task = Queue.front();
    Queue.pop();
Task();

向任务队列中添加任务     具体实现例子

void enqueue(function<void()> new_task)
unique_lock<mutex> lock(queue_mutex);
Queue.push(new_task);
condition.notify_one();
class ThreadPool {
public:
ThreadPool(size_t threads) : stop(false)
for(size_t i = 0;i<threads;++i)
workers.emplace_back(
[this]
for(;;)
std::function<void()> task;
std::unique_lock<std::mutex> lock(this->queue_mutex);
this->condition.wait(lock,
[this]{ return this->stop || !this->tasks.empty(); });
if(this->stop && this->tasks.empty())
return;
task = std::move(this->tasks.front());
this->tasks.pop();
task();
// add new work item to the pool
void enqueue(std::function<void()>& task)
std::unique_lock<std::mutex> lock(queue_mutex);
// don't allow enqueueing after stopping the pool
if(stop)
throw std::runtime_error("enqueue on stopped ThreadPool");
tasks.emplace(task);
condition.notify_one();
~ThreadPool()
std::unique_lock<std::mutex> lock(queue_mutex);
stop = true;
condition.notify_all();
for(std::thread &worker: workers)
worker.join();
private:
std::vector< std::thread > workers;
// the task queue
std::queue< std::function<void()> > tasks;
// synchronization
std::mutex queue_mutex;
std::condition_variable condition;
bool stop;

https://www.codeproject.com/Articles/664709/Creating-a-Work-Queue-Thread-Pool-Application-Usin

https://ce39906.github.io/2018/03/29/C-Thread-Pool-%E4%BD%BF%E7%94%A8%E8%A7%A3%E6%9E%90/

https://www.jianshu.com/p/f7b7083738c3

目录运行时数据区内部结构 运行时数据区 内存是非常重要的系统资源,是硬盘和CPU的中间仓库及桥梁,承载着操作系统和应用程序和实时运行。JVM内存布局规定了Java在运行过程中内存申请、分配、管理的策略。保证了JVM的高效稳定运行。不同的JVM对于内存的划分方式和管理机制存在着部分差异。结合JVM虚拟机规范,来探讨一下经典的JVM内存布局。 Java虚拟机定义了若干种程序运行期间会使用到的运行时数据区,其中有一些会随着虚拟机启动而创建,随着虚拟机退出而消毁。另一些则是与线程一一对应的,这些与线程对
Boostthread库中目前并没有提供线程池,我在sorceforge上找了一个用boost编写的线程池。该线程池boost结合的比较好,并且提供了多种任务执行策略,使用也非常简单。 下载地址: http://threadpool.sourceforge.net/ 使用threadpool: 这个线程池不需要编译,只要在项目中包含其头文件就可以了。 一个简单的例子:
回顾学习find和find_if, 网上查了一下资料,这里记录一下。 STL的find,find_if函数提供了一种对数组、STL容器进行查找的方法。使用该函数,需 #include &lt;algorithm&gt; 我们查找一个list中的数据,通常用find(),例如: 1、find using namespace std; int main()
这个问题看起来是一个编译器的警告。RTTI(Run-Time Type Information)是一种C++的特性,它允许程序在运行时获取对象的类型信息。这个警告的意思是,在编译时无法找到某个类的RTTI符号,可能会导致程序在运行时出现问题。 通常情况下,这个警告可以通过在编译选项中添加-fno-rtti来忽略掉。不过,这样做会导致某些类型安全的代码无法正常工作,因此需要谨慎使用。另外,也可以通过在类定义中添加虚函数来解决这个问题,因为虚函数会自动添加RTTI符号。 如果你需要更具体的帮助,可以提供更多的上下文信息,我会尽力帮你解决问题。