view src/thread_pool_test/src/main.rs @ 11:34aba7ec9efc

add thread pool
author Shinji KONO <kono@ie.u-ryukyu.ac.jp>
date Mon, 18 Jan 2021 15:53:03 +0900
parents
children
line wrap: on
line source

use threadpool::ThreadPool;
use std::sync::mpsc::channel;

fn main1() {
    use std::sync::{Arc, Barrier};
    use std::sync::atomic::{AtomicUsize, Ordering};

// create at least as many workers as jobs or you will deadlock yourself
    let n_workers = 42;
    let n_jobs = 23;
    let pool = ThreadPool::new(n_workers);
    let an_atomic = Arc::new(AtomicUsize::new(0));

    assert!(n_jobs <= n_workers, "too many jobs, will deadlock");

// create a barrier that waits for all jobs plus the starter thread
    let barrier = Arc::new(Barrier::new(n_jobs + 1));
    for _ in 0..n_jobs {
        let barrier = barrier.clone();
        let an_atomic = an_atomic.clone();

        pool.execute(move|| {
            // do the heavy work
            an_atomic.fetch_add(1, Ordering::Relaxed);

            // then wait for the other threads
            barrier.wait();
        });
    }

// wait for the threads to finish the work
    barrier.wait();
    assert_eq!(an_atomic.load(Ordering::SeqCst), /* n_jobs = */ 23);
    println!("barrier done");
}
fn main() {

    let n_workers = 4;
    let n_jobs = 8;
    let pool = ThreadPool::new(n_workers);

    let (tx, rx) = channel();
    for _ in 0..n_jobs {
        let tx = tx.clone();
        pool.execute(move || {
            tx.send(1).expect("channel will be there waiting for the pool");
        });
    }

    assert_eq!(rx.iter().take(n_jobs).fold(0, |a, b| a + b), 8);
    println!("pool done");
    main1();
}