Skip to content

change 'seek lock' from mutex to atomic spinlock #6

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 34 additions & 9 deletions src/SQLiteNestedVFS.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,35 @@
#endif

namespace SQLiteNested {
// from https://rigtorp.se/spinlock/
struct spinlock {
std::atomic<bool> lock_ = {0};

void lock() noexcept {
for (;;) {
// Optimistically assume the lock is free on the first try
if (!lock_.exchange(true, std::memory_order_acquire)) {
return;
}
// Wait for lock to be released without generating cache misses
while (lock_.load(std::memory_order_relaxed)) {
// Issue X86 PAUSE or ARM YIELD instruction to reduce contention between
// hyper-threads
__builtin_ia32_pause();
}
}
}

bool try_lock() noexcept {
// First do a relaxed load to check if lock is free in order to prevent
// unnecessary cache misses if someone does while(!try_lock())
return !lock_.load(std::memory_order_relaxed) &&
!lock_.exchange(true, std::memory_order_acquire);
}

void unlock() noexcept { lock_.store(false, std::memory_order_release); }
};

// Implements I/O methods for the "inner" main database file.
// KEY ASSUMPTIONS:
// 1. SQLite only calls the VFS xWrite() with one or more whole pages, and its first xWrite() to
Expand Down Expand Up @@ -271,7 +300,7 @@ class InnerDatabaseFile : public SQLiteVFS::File {
#endif
}

virtual void Execute(std::unique_lock<std::mutex> *seek_lock = nullptr) noexcept {
virtual void Execute(std::unique_lock<spinlock> *seek_lock = nullptr) noexcept {
assert(GetState() == State::WIP);
assert(!seek_lock || seek_lock->owns_lock());
try {
Expand All @@ -297,15 +326,15 @@ class InnerDatabaseFile : public SQLiteVFS::File {
const size_t MAX_FETCH_CURSORS = 4;
std::vector<std::unique_ptr<FetchJob>> fetch_jobs_;
ThreadPool fetch_thread_pool_;
std::mutex seek_lock_; // serializes outer db interactions among fetch background threads
spinlock seek_lock_; // serializes outer db interactions among fetch background threads
std::atomic<bool> seek_interrupt_; // broadcast that main thread wants seek_lock_

unsigned long long read_opcount_ = 0, prefetch_wins_ = 0, prefetch_wasted_ = 0;
sqlite3_int64 longest_read_ = 0;

void *BackgroundFetchJob(void *ctx) noexcept {
FetchJob *job = (FetchJob *)ctx;
std::unique_lock<std::mutex> seek_lock(seek_lock_);
std::unique_lock<spinlock> seek_lock(seek_lock_);
while (seek_interrupt_.load(std::memory_order_relaxed)) {
// yield to main thread
seek_lock.unlock();
Expand Down Expand Up @@ -379,7 +408,7 @@ class InnerDatabaseFile : public SQLiteVFS::File {
// dest. This elides a memcpy, often one from another processor die
job->dest = dest;
if (can_prefetch) {
std::unique_lock<std::mutex> seek_lock(seek_lock_, std::defer_lock);
std::unique_lock<spinlock> seek_lock(seek_lock_, std::defer_lock);
if (!seek_lock.try_lock()) {
seek_interrupt_.store(true, std::memory_order_relaxed);
seek_lock.lock();
Expand All @@ -398,11 +427,7 @@ class InnerDatabaseFile : public SQLiteVFS::File {
break;
}
while (job->GetState(std::memory_order_relaxed) != FetchJob::State::DONE) {
#ifdef __x86_64__
__builtin_ia32_pause();
#else
std::this_thread::yield();
#endif
}
}
#ifndef NDEBUG
Expand Down Expand Up @@ -541,7 +566,7 @@ class InnerDatabaseFile : public SQLiteVFS::File {
void PrefetchBarrier() {
if (fetch_thread_pool_.MaxThreads() > 1) {
// Abort prefetch jobs that haven't started yet
std::unique_lock<std::mutex> seek_lock(seek_lock_, std::defer_lock);
std::unique_lock<spinlock> seek_lock(seek_lock_, std::defer_lock);
if (!seek_lock.try_lock()) {
seek_interrupt_.store(true, std::memory_order_relaxed);
seek_lock.lock();
Expand Down