#include <UndoLog.hpp>
stm::ByteLoggingUndoLogEntry::ByteLoggingUndoLogEntry |
( |
void ** |
paddr, |
|
|
void * |
pval, |
|
|
uintptr_t |
pmask |
|
) |
| |
|
inline |
static void stm::ByteLoggingUndoLogEntry::DoMaskedWrite |
( |
void ** |
addr, |
|
|
void * |
val, |
|
|
uintptr_t |
mask |
|
) |
| |
|
inlinestatic |
bool stm::ByteLoggingUndoLogEntry::filter |
( |
void ** |
lower, |
|
|
void ** |
upper |
|
) |
| |
|
inline |
The bytelog implementation of the filter operation support any sort of intersection possible.
bool stm::ByteLoggingUndoLogEntry::filterSlow |
( |
void ** |
lower, |
|
|
void ** |
upper |
|
) |
| |
|
private |
We outline the slowpath filter. If this /ever/ happens it will be such a corner case that it just doesn't matter. Plus this is an abort path anyway.
We outline the slowpath filter. If this /ever/ happens it will be such a corner case that it just doesn't matter. Plus this is an abort path anyway... consider it a contention management technique.
void stm::ByteLoggingUndoLogEntry::undo |
( |
| ) |
const |
|
inline |
void** stm::ByteLoggingUndoLogEntry::addr |
uint8_t* stm::ByteLoggingUndoLogEntry::byte_addr |
uint8_t stm::ByteLoggingUndoLogEntry::byte_mask[sizeof(uintptr_t)] |
uint8_t stm::ByteLoggingUndoLogEntry::byte_val[sizeof(void *)] |
uintptr_t stm::ByteLoggingUndoLogEntry::mask |
void* stm::ByteLoggingUndoLogEntry::val |
The documentation for this struct was generated from the following files: