-
Notifications
You must be signed in to change notification settings - Fork 1
/
comb.rs
416 lines (357 loc) · 13 KB
/
comb.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
//! Detectable Combining Operation
#![allow(missing_docs)]
use crate::pepoch::PAtomic;
use crate::ploc::Handle;
use crate::pmem::{persist_obj, Collectable, GarbageCollection, PoolHandle};
use crate::Memento;
use array_init::array_init;
use crossbeam_epoch::Guard;
use crossbeam_utils::{Backoff, CachePadded};
use libc::c_void;
use mmt_derive::Collectable;
use std::sync::atomic::{AtomicUsize, Ordering};
use self::combining_lock::CombiningLock;
pub const MAX_THREADS: usize = 64;
const COMBINING_ROUNDS: usize = 20;
/// restriction of combining iteration
pub static mut NR_THREADS: usize = MAX_THREADS;
/// Node
#[derive(Debug, Collectable)]
#[repr(align(128))]
pub struct Node {
pub data: usize,
pub next: PAtomic<Node>,
}
/// Trait for Memento
pub trait Combinable: Memento {
// checkpoint activate of request
fn chk_activate(&mut self, activate: usize, handle: &Handle) -> usize;
fn peek_retval(&mut self) -> usize;
fn backup_retval(&mut self, return_value: usize);
}
/// request obj
#[derive(Default, Debug, Collectable)]
pub struct CombRequest {
arg: AtomicUsize,
activate: AtomicUsize,
}
/// state obj
#[derive(Debug)]
pub struct CombStateRec {
pub data: PAtomic<c_void>, // The actual data of the state (e.g. tail for enqueue, head for dequeue)
return_value: [usize; MAX_THREADS + 1],
deactivate: [AtomicUsize; MAX_THREADS + 1],
}
impl CombStateRec {
pub fn new<T>(data: PAtomic<T>) -> Self {
Self {
data: unsafe { (&data as *const _ as *const PAtomic<c_void>).read() },
return_value: array_init(|_| Default::default()),
deactivate: array_init(|_| Default::default()),
}
}
}
impl Collectable for CombStateRec {
fn filter(s: &mut Self, tid: usize, gc: &mut GarbageCollection, pool: &mut PoolHandle) {
Collectable::filter(&mut s.data, tid, gc, pool);
}
}
impl Clone for CombStateRec {
fn clone(&self) -> Self {
Self {
data: self.data.clone(),
return_value: array_init(|i| self.return_value[i]),
deactivate: array_init(|i| {
AtomicUsize::new(self.deactivate[i].load(Ordering::Relaxed))
}),
}
}
}
/// per-thread state for combining
#[derive(Debug)]
pub struct CombThreadState {
index: AtomicUsize,
state: [PAtomic<CombStateRec>; 2],
}
impl CombThreadState {
pub fn new<T>(data: PAtomic<T>, pool: &PoolHandle) -> Self {
Self {
index: Default::default(),
state: array_init(|_| PAtomic::new(CombStateRec::new(data.clone()), pool)),
}
}
}
impl Collectable for CombThreadState {
fn filter(s: &mut Self, tid: usize, gc: &mut GarbageCollection, pool: &mut PoolHandle) {
Collectable::filter(&mut s.state[0], tid, gc, pool);
Collectable::filter(&mut s.state[1], tid, gc, pool);
}
}
/// Central object for combining
#[allow(missing_debug_implementations)]
pub struct CombStruct {
// General func for additional behavior: e.g. persist enqueued nodes
final_func: Option<&'static dyn Fn(&CombStruct, &Guard, &PoolHandle)>,
after_func: Option<&'static dyn Fn(&CombStruct, &Guard, &PoolHandle)>,
// Variables located at volatile location
lock: &'static CachePadded<CombiningLock>,
// Variables located at persistent location
request: [CachePadded<CombRequest>; MAX_THREADS + 1], // per-thread requests
pub pstate: CachePadded<PAtomic<CombStateRec>>, // stable state
}
impl Collectable for CombStruct {
fn filter(s: &mut Self, tid: usize, gc: &mut GarbageCollection, pool: &mut PoolHandle) {
for t in 0..s.request.len() {
Collectable::filter(&mut *s.request[t], tid, gc, pool);
}
Collectable::filter(&mut *s.pstate, tid, gc, pool);
}
}
impl CombStruct {
pub fn new(
final_func: Option<&'static dyn Fn(&CombStruct, &Guard, &PoolHandle)>,
after_func: Option<&'static dyn Fn(&CombStruct, &Guard, &PoolHandle)>,
lock: &'static CachePadded<CombiningLock>,
request: [CachePadded<CombRequest>; MAX_THREADS + 1],
pstate: CachePadded<PAtomic<CombStateRec>>,
) -> Self {
Self {
final_func,
after_func,
lock,
request,
pstate,
}
}
}
#[derive(Debug)]
pub struct Combining {}
impl Combining {
// sfunc: (state data (head or tail), arg, tid, guard, pool) -> return value
pub fn apply_op<M: Combinable>(
arg: usize,
(s, st_thread, sfunc): (
&CombStruct,
&CombThreadState,
&dyn Fn(&PAtomic<c_void>, usize, &Handle) -> usize,
),
mmt: &mut M,
handle: &Handle,
) -> usize {
let (tid, guard, pool) = (handle.tid, &handle.guard, handle.pool);
// Checkpoint activate
let activate =
mmt.chk_activate(s.request[tid].activate.load(Ordering::Relaxed) + 1, handle);
// Check if my request was already performed.
if handle.rec.load(Ordering::Relaxed) {
let latest_state = unsafe { s.pstate.load(Ordering::SeqCst, guard).deref(pool) };
let deactivate = latest_state.deactivate[tid].load(Ordering::SeqCst);
if activate < deactivate {
return mmt.peek_retval();
}
// if i was a combiner, i have to finalize the combine.
if activate == deactivate && !s.lock.is_owner(tid) {
let retval = latest_state.return_value[tid];
mmt.backup_retval(retval);
return retval;
}
if activate < s.request[tid].activate.load(Ordering::Relaxed) {
return mmt.peek_retval();
}
handle.rec.store(false, Ordering::Relaxed);
}
// Register request
s.request[tid].arg.store(arg, Ordering::Relaxed);
s.request[tid].activate.store(activate, Ordering::Release);
// Do
loop {
match s.lock.try_lock(tid) {
Ok(_) => return Self::do_combine((s, st_thread, sfunc), mmt, handle),
Err(_) => {
if let Ok(retval) = Self::do_non_combine(s, mmt, handle.tid) {
return retval;
}
}
}
}
}
/// combiner performs the requests
///
/// 1. ready: copy central state to my thread-local state, pt.state[pt.index]
/// 2. perform: update pt.state[pt.index]
/// 3. finalize:
/// 3.1. central state = pt.state[pt.index] (commit point)
/// 3.2. pt.index = 1 - pt.index
/// 3.3. release lock
fn do_combine<M: Combinable>(
(s, st_thread, sfunc): (
&CombStruct,
&CombThreadState,
&dyn Fn(
// This is data of CombStruct. (e.g. `tail` for Enqueue combiner, `head` for Dequeue combiner)
// Combiner will give stable value for this argument using old/new flipping logic.
&PAtomic<c_void>,
// Arugment
// Combiner will give stable value for this argument using old/new flipping logic.
usize,
&Handle,
) -> usize,
),
mmt: &mut M,
handle: &Handle,
) -> usize {
let (tid, guard, pool) = (handle.tid, &handle.guard, handle.pool);
// ready
let ind = st_thread.index.load(Ordering::Relaxed);
let mut new_state = st_thread.state[ind].load(Ordering::Relaxed, guard);
let new_state_ref = unsafe { new_state.deref_mut(pool) };
*new_state_ref = unsafe { s.pstate.load(Ordering::Relaxed, guard).deref(pool) }.clone(); // create a copy of current state
// perform requests
for _ in 0..COMBINING_ROUNDS {
let mut serve_reqs = 0;
for t in 1..unsafe { NR_THREADS } + 1 {
let t_activate = s.request[t].activate.load(Ordering::Acquire);
if t_activate > new_state_ref.deactivate[t].load(Ordering::Relaxed) {
new_state_ref.return_value[t] = sfunc(
&new_state_ref.data,
s.request[t].arg.load(Ordering::Relaxed),
handle,
);
new_state_ref.deactivate[t].store(t_activate, Ordering::Release);
// cnt
serve_reqs += 1;
}
}
if serve_reqs == 0 {
break;
}
}
// e.g. enqueue: persist all enqueued node
if let Some(func) = s.final_func {
func(s, guard, pool);
}
persist_obj(new_state_ref, true);
// 3.1 central state = pt.state[pt.index] (commit point)
s.pstate.store(new_state, Ordering::Release);
persist_obj(&*s.pstate, true);
// e.g. enqueue: update old tail
if let Some(func) = s.after_func {
func(s, guard, pool);
}
// 3.2. flip per-thread index
st_thread.index.store(1 - ind, Ordering::Relaxed);
// 3.3. release lock with new state
unsafe { s.lock.unlock(new_state_ref as *const _ as usize) };
let retval = new_state_ref.return_value[tid];
mmt.backup_retval(retval);
retval
}
/// non-combiner (1) wait until combiner unlocks the lock and (2) check if my request was performed (3) return
fn do_non_combine<M: Combinable>(
// &self,
s: &CombStruct,
mmt: &mut M,
tid: usize,
) -> Result<usize, ()> {
// wait until the combiner unlocks the lock
let backoff = Backoff::new();
let mut combined_ptr;
let mut combined_tid;
loop {
(combined_ptr, combined_tid) = s.lock.peek();
if combined_tid == 0 {
break;
}
#[cfg(not(feature = "pmcheck"))]
backoff.snooze();
#[cfg(feature = "pmcheck")]
println!("[do_non_combine] ptr: {combined_ptr}, tid: {combined_tid}");
}
// check if my request was performed
let lastest_state = unsafe { (combined_ptr as *const CombStateRec).as_ref().unwrap() };
if s.request[tid].activate.load(Ordering::Relaxed)
<= lastest_state.deactivate[tid].load(Ordering::Acquire)
{
let retval = lastest_state.return_value[tid];
mmt.backup_retval(retval);
return Ok(retval);
}
Err(())
}
}
pub mod combining_lock {
//! Thread-recoverable lock for combining
use core::sync::atomic::Ordering;
use std::sync::atomic::AtomicUsize;
use crate::impl_left_bits;
// Auxiliary Bits
// aux bits: MSB 55-bit in 64-bit
// Used for:
// - Comb: Indicating ptr of combined state
pub(crate) const POS_AUX_BITS: u32 = 0;
pub(crate) const NR_AUX_BITS: u32 = 55;
impl_left_bits!(aux_bits, POS_AUX_BITS, NR_AUX_BITS, usize);
#[inline]
fn compose_aux_bit(aux: usize, data: usize) -> usize {
(aux_bits() & (aux.rotate_right(POS_AUX_BITS + NR_AUX_BITS))) | (!aux_bits() & data)
}
#[inline]
fn decompose_aux_bit(data: usize) -> (usize, usize) {
(
(data & aux_bits()).rotate_left(POS_AUX_BITS + NR_AUX_BITS),
!aux_bits() & data,
)
}
/// thread-recoverable spin lock
#[derive(Debug, Default)]
pub struct CombiningLock {
inner: AtomicUsize, // 55:ptr of state, 9:tid occupying the lock
}
impl CombiningLock {
const PTR_NULL: usize = 0;
const RELEASED: usize = 0;
/// Try lock
///
/// return Ok: (seq, guard)
/// return Err: (seq, tid)
pub fn try_lock(&self, tid: usize) -> Result<(), (usize, usize)> {
let current = self.inner.load(Ordering::Relaxed);
let (_ptr, _tid) = decompose_aux_bit(current);
if self.is_owner(tid) {
return Ok(());
}
if _tid != Self::RELEASED {
return Err((_ptr, _tid));
}
self.inner
.compare_exchange(
current,
compose_aux_bit(Self::PTR_NULL, tid),
Ordering::Acquire,
Ordering::Relaxed,
)
.map(|_| ())
.map_err(|_| (_ptr, _tid))
}
/// peek
///
/// return (ptr, tid)
pub fn peek(&self) -> (usize, usize) {
decompose_aux_bit(self.inner.load(Ordering::Acquire))
}
pub fn is_owner(&self, tid: usize) -> bool {
let current = self.inner.load(Ordering::Relaxed);
let (_, _tid) = decompose_aux_bit(current);
tid == _tid
}
/// unlock
///
/// # Safety
///
/// Only the thread who get `Ok()` as return value from `try_lock()` should call `unlock()`. Also, that thread shouldn't call twice or more for one `try_lock()`.
pub unsafe fn unlock(&self, ptr: usize) {
self.inner
.store(compose_aux_bit(ptr, Self::RELEASED), Ordering::Release);
}
}
}