Pseudocode to accompany “Preemption Adaptivity in Time-Published Queue-Based Spin Locks,” by Bijun He, William N. Scherer III, and Michael L. Scott, presented at HiPC'05 (earlier but extended version available as URCS technical report 867). Copyright © 2005, University of Rochester. Winner, Best Paper award.
The key idea is for every thread to periodically record its current timestamp to a shared memory location. Given the high resolution, roughly synchronized clocks of modern processors, this convention allows threads to guess accurately which peers are active based on the currency of their timestamps. Experimental results indicate that time-published locks make it feasible, for the first time, to use queue-based spin locks on multiprogrammed systems with a standard kernel interface.
The MCS-TP lock is based on the original MCS queue-based lock, due to Mellor-Crummey and Scott. It can be expected to perform well on any coherent shared-memory machine, with or without caching of remote locations. It strictly respects specified timeouts, and ensures that queues never exceed T nodes in length, where T is the number of active threads. In the worst case, however, lock handoff may also be O(T).
The CLH-TP lock is derived from our CLH-NB try lock, which in turn is based on the CLH queue-based lock, due to Craig and to Landin and Hagersten. It provides O(1) lock handoff, but O(T) timeout and O(T2) queue length, worst case. Both algorithms have constant-time handoff, constant-time timeout, and linear queue length in the common case. Both require space management code for dynamic allocation and reclamation of queue nodes. To avoid the ABA problem, the code must ensure that the space occupied by a reclaimed queue node is never reallocated by a different thread.
typedef struct mcstp_qnode { mcstp_lock *last_lock; // lock from last attempt volatile hrtime_t time; // published timestamp volatile qnode_status status; struct mcstp_qnode *volatile next; } mcstp_qnode; typedef struct mcstp_lock { mcstp_qnode *volatile tail; volatile hrtime_t cs_start_time; } mcstp_lock; #define mtp_swap(p,v) ((mcstp_qnode *) \ swap((volatile unsigned long *)(p), (unsigned long)(v))) #define compare_and_store(p,o,n) \ (cas((volatile unsigned long *) (p), \ (unsigned long) (o), (unsigned long) (n)) \ == (unsigned long) (o)) extern int MAX_CS_TIME; // Approximate upper bound // on the length of a critical // section. extern int MAX_THREADS; // Approximate max number // of threads in the system. extern int UPDATE_DELAY; // Approximate length of time // it takes a thread to see // a timestamp published on // another thread, including // any potential clock skew. bool mcstp_acquire(mcstp_lock *L, mcstp_qnode *I, hrtime_t T) { mcstp_qnode *pred; hrtime_t start_time = START_TIME; // try to reclaim position in queue if (I->status != timedout || I->last_lock != L || !compare_and_store(&I->status, timedout, waiting)) { I->status = waiting; I->next = 0; pred = swap(&L->tail, I); if (!pred) { // lock was free L->cs_start_time = gethrtime(); return true; } else pred->next = I; } while (1) { if (I->status == available) { L->cs_start_time = gethrtime(); return 1; } else if (I->status == failed) { if (CUR_TIME - L->cs_start_time > MAX_CS_TIME) yield(); I->last_lock = L; return false; } while (I->status == waiting) { I->time = gethrtime(); if (CUR_TIME - start_time <= T) continue; if (!compare_and_store(&I->status, waiting, timedout)) { I->last_lock = L; break; } if (CUR_TIME - L->cs_start_time > MAX_CS_TIME) yield(); return false; } } } void mcstp_release (mcstp_lock *L, mcstp_qnode *I) { int scanned_nodes = 0; mcstp_qnode *succ, *curr = I, *last = NULL; while (1) { succ = curr->next; if (!succ) { if (compare_and_store(&L->tail, curr, 0)) { curr->status = failed; return; // I was last in line. } while (!succ) succ = curr->next; } if (++scanned_nodes < MAX_THREADS) curr->status = failed; else if (!last) last = curr; // handle treadmill case if (succ->status == waiting) { hrtime_t succ_time = succ->time; if ((CUR_TIME - succ_time <= UPDATE_DELAY) && compare_and_store(&succ->status, waiting, available)) { for ( ; last && last != curr; last = last->next) last->status = failed; return; } } curr = succ; } }
int cas_w_waiting(node_t * volatile *addr, unsigned long oldv, unsigned long newv, node_t * volatile *me) { do { unsigned long tmp = LL(addr); if (tmp != oldv || !is_waiting(me)) return 0; } while(!SC(addr, newv)); return 1; } // atomic operation which saves the old value of // swap_addr in set_addr, and swaps the new_ptr // into the swap_addr. bool clhtp_swap_and_set( clhtp_qnode *volatile *swap_addr, clhtp_qnode *new_ptr, clhtp_qnode *volatile *set_addr) { unsigned long pred; repeat pred = LL(swap_addr); (*set_addr) = (clhtp_qnode *) pred; while (0 == SC(swap_addr, new_ptr)); return (clhtp_qnode *)pred; } // atomic compare and swap the tag in the pointer. bool clhtp_tag_cas_bool(clhtp_qnode * volatile * p, unsigned long oldtag, unsigned long newtag) { unsigned long oldv, newv; do { oldv = LL(p); if (get_tagbits(oldv) != oldtag) return false; newv = replace_tag(oldv, newtag); } while (!SC(p, newv)); return true; } bool clhtp_rcas_bool( clhtp_qnode *volatile *stateptr, clhtp_qnode *volatile *ptr, clhtp_qnode *oldp, unsigned long newv) { unsigned long oldv = (unsigned long)oldp; do { unsigned long tmp = LL(ptr); if (get_tagbits(*stateptr) != WAITING) return false; if (tmp != oldv) return true; } while (0 == SC(ptr, newv)); return true; } void clhtp_failure_epilogue( clhtp_lock *L, clhtp_qnode *I) { if (I->prev == SELFRC || !clhtp_tag_cas_bool(&I->prev, PTIMEOUT, SUCRC)) { free_clhtp_qnode(I); } } void clhtp_success_epilogue(clhtp_lock *L, clhtp_qnode *I, clhtp_qnode *pred) { L->lock_holder = I; L->cs_start_time = CUR_TIME; free_clhtp_qnode(pred); } bool clhtp_acquire(clhtp_lock *L, hrtime_t T) { clhtp_qnode *I = alloc_qnode(); clhtp_qnode *pred; I->time = CUR_TIME; pred = swap_and_set(&L->tail, I, &I->prev); if (pred->prev == AVAILABLE) { if (compare_and_store(&I->prev, pred, HOLDING)) { clhtp_success_epilogue(L, I, pred); return true; } else { clhtp_failure_epilogue(L, I); if (CUR_TIME - L->cs_start_time > MAX_CSTICKS) yield(); return false; } } bool result = clhtp_acquire_slow_path(L, T, I, pred); if (!result) if (CUR_TIME - L->cs_start_time > MAX_CSTICKS) yield(); return result; } bool clhtp_acquire_slow_path(clhtp_lock *L, hrtime_t T, clhtp_qnode * I,clhtp_qnode * pred) { hrtime_t my_start_time, current, pred_time; my_start_time = I->time; pred_time = pred->time; while (true) { clhtp_qnode *pred_pred; current = gethrtime(); I->time = current; pred_pred = pred->prev; if (pred_pred == AVAILABLE) { if (compare_and_store(&I->prev, pred, HOLDING)) goto label_success; goto label_failure; } else if (pred_pred == SELFRC) goto label_self_rc; else if (pred_pred == HOLDING or INITIAL) goto check_self; else { clhtp_qnode *pp_ptr = get_ptr(pred_pred); unsigned int pred_tag = get_tagbits(pred_pred); if (pred_tag == SUCRC) { if (!CAS_BOOL(&I->prev, pred, pp_ptr)) goto label_failure; free_clhtp_qnode(pred); pred = pp_ptr; pred_time = pred->time; continue; } else if (pred_tag == PTIMEOUT) { if (!compare_and_store(&I->prev, pred, pp_ptr)) goto label_failure; if (!compare_and_store(&pred->prev, pred_pred, SELFRC)) free_clhtp_qnode(pred); pred = pp_ptr; pred_time = pred->time; continue; } else if (pred_tag == WAITING) { if (CUR_TIME - pred_time - UPDATE_DELAY > current) { if (pred->time != pred_time) { pred_time = pred->time; continue; } else if (clhtp_rcas_bool( &I->prev, &pred->prev, pred_pred, tagged_wptr(pred_pred, PTIMEOUT))) continue; } } } check_self: unsigned int my_tag; pred = I->prev; if (pred == SELFRC) goto label_self_rc; my_tag = get_tagbits(pred); if (my_tag == PTIMEOUT) goto label_failure; else if (my_tag == WAITING) { if (CUR_TIME - my_start_time - T > current) goto label_self_timeout; } } label_success: clhtp_success_epilogue(L, I, pred); return true; label_failure: label_self_rc: clhtp_failure_epilogue(L, I); return false; label_self_timeout: if (!compare_and_store(&I->prev, pred, tagged_wptr(pred, SUCRC))) { clhtp_failure_epilogue(L, I); return false; } } void clhtp_try_release(clhtp_lock *L) { clhtp_qnode *I = L->lock_holder; I->prev = AVAILABLE; }