tlds
Transactional Operations for Linked Data Structures
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
locks.hpp
Go to the documentation of this file.
1 /**
2  * Copyright (C) 2011
3  * University of Rochester Department of Computer Science
4  * and
5  * Lehigh University Department of Computer Science and Engineering
6  *
7  * License: Modified BSD
8  * Please see the file LICENSE.RSTM for licensing information
9  */
10 
11 /**
12  * Given all the atomic operations we defined in platform.hpp, we can now
13  * declare the lock types that we need for implementing some single-lock
14  * based STMs.
15  */
16 
17 #ifndef LOCKS_HPP__
18 #define LOCKS_HPP__
19 
20 #include "common/platform.hpp"
21 
22 /**
23  * Tune backoff parameters
24  *
25  * NB: At some point (probably mid 2010), these values were experimentally
26  * verified to provide good performance for some workload using TATAS
27  * locks. Whether they are good values anymore is an open question.
28  */
29 #if defined(STM_CPU_SPARC)
30 #define MAX_TATAS_BACKOFF 4096
31 #else // (STM_CPU_X86)
32 #define MAX_TATAS_BACKOFF 524288
33 #endif
34 
35 /*** Issue 64 nops to provide a little busy waiting */
36 inline void spin64()
37 {
38  for (int i = 0; i < 64; i++)
39  nop();
40 }
41 
42 /*** exponential backoff for TATAS locks */
43 inline void backoff(int *b)
44 {
45  for (int i = *b; i; i--)
46  nop();
47  if (*b < MAX_TATAS_BACKOFF)
48  *b <<= 1;
49 }
50 
51 /*** TATAS lock: test-and-test-and-set with exponential backoff */
52 typedef volatile uintptr_t tatas_lock_t;
53 
54 /*** Slowpath TATAS acquire. This performs exponential backoff */
56 {
57  int b = 64;
58  do {
59  backoff(&b);
60  } while (tas(lock));
61  return b;
62 }
63 
64 /**
65  * Fastpath TATAS acquire. The return value is how long we spent spinning
66  */
67 inline int tatas_acquire(tatas_lock_t* lock)
68 {
69  if (!tas(lock))
70  return 0;
71  return tatas_acquire_slowpath(lock);
72 }
73 
74 /*** TATAS release: ordering is safe for SPARC, x86 */
75 inline void tatas_release(tatas_lock_t* lock)
76 {
77  CFENCE;
78  *lock = 0;
79 }
80 
81 /**
82  * Ticket lock: this is the easiest implementation possible, but might not be
83  * the most optimal w.r.t. cache misses
84  */
86 {
87  volatile uintptr_t next_ticket;
88  volatile uintptr_t now_serving;
89 };
90 
91 /**
92  * Acquisition of a ticket lock entails an increment, then a spin. We use a
93  * counter to measure how long we spend spinning, in case that information
94  * is useful to adaptive STM mechanisms.
95  */
96 inline int ticket_acquire(ticket_lock_t* lock)
97 {
98  int ret = 0;
99  uintptr_t my_ticket = faiptr(&lock->next_ticket);
100  while (lock->now_serving != my_ticket)
101  ret++;
102  return ret;
103 }
104 
105 /*** Release the ticket lock */
106 inline void ticket_release(ticket_lock_t* lock)
107 {
108  lock->now_serving += 1;
109 }
110 
111 /*** Simple MCS lock implementation */
113 {
114  volatile bool flag;
115  volatile mcs_qnode_t* volatile next;
116 };
117 
118 /**
119  * MCS acquire. We count how long we spin, in order to detect very long
120  * delays
121  */
122 inline int mcs_acquire(mcs_qnode_t** lock, mcs_qnode_t* mine)
123 {
124  // init my qnode, then swap it into the root pointer
125  mine->next = 0;
126  mcs_qnode_t* pred = (mcs_qnode_t*)atomicswapptr(lock, mine);
127 
128  // now set my flag, point pred to me, and wait for my flag to be unset
129  int ret = 0;
130  if (pred != 0) {
131  mine->flag = true;
132  pred->next = mine;
133  while (mine->flag) { ret++; } // spin
134  }
135  return ret;
136 }
137 
138 /*** MCS release */
139 inline void mcs_release(mcs_qnode_t** lock, mcs_qnode_t* mine)
140 {
141  // if my node is the only one, then if I can zero the lock, do so and I'm
142  // done
143  if (mine->next == 0) {
144  if (bcasptr(lock, mine, static_cast<mcs_qnode_t*>(NULL)))
145  return;
146  // uh-oh, someone arrived while I was zeroing... wait for arriver to
147  // initialize, fall out to other case
148  while (mine->next == 0) { } // spin
149  }
150  // other case: someone is waiting on me... set their flag to let them start
151  mine->next->flag = false;
152 }
153 
154 #endif // LOCKS_HPP__
void spin64()
Definition: locks.hpp:36
int tatas_acquire(tatas_lock_t *lock)
Definition: locks.hpp:67
int tatas_acquire_slowpath(tatas_lock_t *lock)
Definition: locks.hpp:55
int mcs_acquire(mcs_qnode_t **lock, mcs_qnode_t *mine)
Definition: locks.hpp:122
int ticket_acquire(ticket_lock_t *lock)
Definition: locks.hpp:96
volatile uintptr_t next_ticket
Definition: locks.hpp:87
#define MAX_TATAS_BACKOFF
Definition: locks.hpp:32
void tatas_release(tatas_lock_t *lock)
Definition: locks.hpp:75
bool ret
Definition: stmskip.cc:242
volatile uintptr_t now_serving
Definition: locks.hpp:88
Definition: locks.hpp:85
volatile uintptr_t tatas_lock_t
Definition: locks.hpp:52
volatile mcs_qnode_t *volatile next
Definition: locks.hpp:115
void ticket_release(ticket_lock_t *lock)
Definition: locks.hpp:106
void backoff(int *b)
Definition: locks.hpp:43
volatile bool flag
Definition: locks.hpp:114
Definition: locks.hpp:112
void mcs_release(mcs_qnode_t **lock, mcs_qnode_t *mine)
Definition: locks.hpp:139