Fearless concurrency, threads, message passing with channels, and shared state with Mutex.
Rust's threading model is built on the ownership system. Threads are lightweight and safe to spawn thanks to the borrow checker catching data races at compile time.
use std::thread;
use std::time::Duration;
fn main() {
// Spawning a simple thread
let handle = thread::spawn(|| {
for i in 1..5 {
println!("Thread says: {}", i);
thread::sleep(Duration::from_millis(100));
}
});
// Main thread continues
for i in 1..3 {
println!("Main says: {}", i);
thread::sleep(Duration::from_millis(150));
}
// Wait for the thread to complete
handle.join().unwrap();
}
use std::thread;
fn main() {
let v = vec![1, 2, 3];
// Using the move keyword to transfer ownership
let handle = thread::spawn(move || {
println!("Vector in thread: {:?}", v);
// v is owned by the thread now
});
// This won't compile: v has been moved
// println!("{:?}", v);
handle.join().unwrap();
}
Use move before the closure to transfer ownership of captured variables into the thread. Without it, the compiler will reject the code if the spawned thread might outlive the data it references.
use std::thread;
fn main() {
let data = vec![1, 2, 3, 4, 5];
let mut handles = vec![];
// Spawn multiple threads, each processing a chunk
for chunk in data.chunks(2) {
let data = chunk.to_vec();
let handle = thread::spawn(move || {
let sum: i32 = data.iter().sum();
println!("Chunk sum: {}", sum);
sum
});
handles.push(handle);
}
// Collect results
for handle in handles {
match handle.join() {
Ok(_) => println!("Thread completed successfully"),
Err(_) => println!("Thread panicked"),
}
}
}
Channels provide safe communication between threads without sharing memory. The mpsc (multi-producer, single-consumer) pattern is the most common approach. The sender transfers ownership of each value to the receiver.
use std::sync::mpsc;
use std::thread;
fn main() {
// Create a channel
let (tx, rx) = mpsc::channel();
thread::spawn(move || {
let val = String::from("hello from thread");
tx.send(val).unwrap();
// val is moved by send, can't use it here
});
// Receive the message (blocks until available)
let received = rx.recv().unwrap();
println!("Got: {}", received);
}
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
fn main() {
let (tx, rx) = mpsc::channel();
thread::spawn(move || {
let vals = vec![
String::from("hi"),
String::from("from"),
String::from("the"),
String::from("thread"),
];
for val in vals {
tx.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
// Iterate over received messages (blocks on each)
for received in rx {
println!("Got: {}", received);
}
}
use std::sync::mpsc;
use std::thread;
fn main() {
let (tx, rx) = mpsc::channel();
// Multiple producers via clone
let tx1 = tx.clone();
let tx2 = tx.clone();
thread::spawn(move || {
tx1.send("msg from thread 1").unwrap();
});
thread::spawn(move || {
tx2.send("msg from thread 2").unwrap();
});
drop(tx); // Drop original sender
for received in rx {
println!("{}", received);
}
}
send() takes ownership of the value, so the sending thread can never accidentally use it afterward. This is how Rust prevents data races through its type system rather than runtime checks.
Mutex (mutual exclusion) protects shared data with a lock. Combined with Arc (atomic reference counting) for thread-safe shared ownership, it enables safe mutable state across threads.
use std::sync::Mutex;
fn main() {
let counter = Mutex::new(0);
{
let mut num = counter.lock().unwrap();
*num += 1;
} // Lock is released here
println!("Result: {}", *counter.lock().unwrap());
}
use std::sync::{Arc, Mutex};
use std::thread;
fn main() {
let counter = Arc::new(Mutex::new(0));
let mut handles = vec![];
for _ in 0..10 {
let counter = Arc::clone(&counter);
let handle = thread::spawn(move || {
let mut num = counter.lock().unwrap();
*num += 1;
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
println!("Result: {}", *counter.lock().unwrap()); // 10
}
use std::sync::{Arc, Mutex};
use std::thread;
// Practical example: thread-safe bank account
struct BankAccount {
balance: Arc<Mutex<f64>>,
impl BankAccount {
fn new(initial: f64) -> Self {
BankAccount {
balance: Arc::new(Mutex::new(initial)),
}
}
fn deposit(&self, amount: f64) {
let mut balance = self.balance.lock().unwrap();
*balance += amount;
}
fn withdraw(&self, amount: f64) -> bool {
let mut balance = self.balance.lock().unwrap();
if *balance >= amount {
*balance -= amount;
true
} else {
false
}
}
fn main() {
let account = Arc::new(BankAccount::new(100.0));
let mut handles = vec![];
for i in 0..5 {
let account = Arc::clone(&account);
let handle = thread::spawn(move || {
account.deposit(10.0);
println!("Thread {} deposited 10", i);
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
}
The lock is automatically released when the MutexGuard goes out of scope. Keep your critical sections as short as possible to minimise contention between threads.
Send and Sync are marker traits that indicate thread-safe types. Send means ownership can be transferred between threads. Sync means references can be shared safely between threads.
use std::cell::RefCell;
use std::rc::Rc;
// Rc<T> is not Send (not thread-safe)
// RefCell<T> is not Sync (interior mutability without locking)
fn is_send<T: Send>() {}
fn is_sync<T: Sync>() {}
fn main() {
// These work
is_send::<String>();
is_sync::<i32>();
// This would NOT compile:
// is_send::<Rc<i32>>();
}
use std::sync::{Arc, Mutex};
use std::thread;
// Custom struct that is Send + Sync
#[derive(Clone)]
struct Counter {
value: Arc<Mutex<i32>>,
fn main() {
let counter = Counter {
value: Arc::new(Mutex::new(0)),
};
let mut handles = vec![];
for _ in 0..5 {
let counter = counter.clone();
let handle = thread::spawn(move || {
let mut val = counter.value.lock().unwrap();
*val += 1;
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
}
If you try to send an Rc<T> or RefCell<T> across thread boundaries, the compiler will stop you. This is Rust's "fearless concurrency", data races are prevented at compile time, not discovered at runtime.
Deadlocks occur when threads hold locks and wait for each other. Best practices include keeping critical sections small and always acquiring locks in a consistent order.
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::Duration;
// DEADLOCK EXAMPLE (do not run)
//
// Thread A: locks lock1, then waits for lock2
// Thread B: locks lock2, then waits for lock1
//
// Both threads wait forever!
//
// let lock1 = Arc::new(Mutex::new(0));
// let lock2 = Arc::new(Mutex::new(0));
//
// thread::spawn(move || {
// let _a = l1.lock().unwrap();
// thread::sleep(Duration::from_secs(1));
// let _b = l2.lock().unwrap(); // waits for lock2
// });
//
// let _a = lock2.lock().unwrap();
// let _b = lock1.lock().unwrap(); // waits for lock1 - DEADLOCK!
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::Duration;
fn main() {
// SAFE: keep critical sections small
let counter = Arc::new(Mutex::new(0));
let mut handles = vec![];
for id in 0..5 {
let counter = Arc::clone(&counter);
let handle = thread::spawn(move || {
// Do expensive work WITHOUT holding the lock
thread::sleep(Duration::from_millis(10));
// Only lock for the critical operation
{
let mut num = counter.lock().unwrap();
*num += 1;
} // Lock released immediately
// Continue work without the lock
thread::sleep(Duration::from_millis(10));
println!("Thread {} done", id);
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
}
Always acquire locks in the same order across all threads. Keep critical sections as small as possible. Prefer message passing (channels) over shared state when you can. Use try_lock() for non-blocking lock attempts.
A common pattern is splitting data into chunks, processing them in parallel across threads, and collecting the results via channels.
use std::sync::mpsc;
use std::thread;
fn process_chunk(data: Vec<i32>) -> i32 {
data.iter().sum()
fn main() {
let data = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let chunk_size = 3;
let (tx, rx) = mpsc::channel();
let mut handles = vec![];
for chunk in data.chunks(chunk_size) {
let chunk = chunk.to_vec();
let tx = tx.clone();
let handle = thread::spawn(move || {
let result = process_chunk(chunk);
tx.send(result).unwrap();
});
handles.push(handle);
}
drop(tx); // Drop original sender so rx iterator ends
let total: i32 = rx.iter().sum();
println!("Total: {}", total); // 55
for handle in handles {
handle.join().unwrap();
}
}
use std::sync::{Arc, Mutex};
use std::thread;
// Simple thread pool sketch
struct ThreadPool {
size: usize,
impl ThreadPool {
fn new(size: usize) -> ThreadPool {
ThreadPool { size }
}
fn execute<F>(&self, f: F)
where
F: FnOnce() + Send + 'static,
{
thread::spawn(f);
}
fn main() {
let pool = ThreadPool::new(4);
let counter = Arc::new(Mutex::new(0));
for i in 0..10 {
let counter = Arc::clone(&counter);
pool.execute(move || {
let mut num = counter.lock().unwrap();
*num += i;
println!("Processing item {}", i);
});
}
thread::sleep(std::time::Duration::from_secs(1));
println!("Final count: {}", *counter.lock().unwrap());
}
Rust's ownership and type system guarantee at compile time that your concurrent code is free of data races. If it compiles, it's safe. For production thread pools, consider the rayon crate which provides parallel iterators with minimal boilerplate.