#include "kernel/spinlock.h" #include "kernel/cpu.h" #include "kernel/riscv.h" #include "kernel/atomic.h" #define NO_CPU 0xffff'ffff static bool current_cpu_holding(const Spinlock* lock) { return lock->locking == current_cpu()->id; } static void push_intr_off() { Cpu* cpu = current_cpu(); bool old_intr_enabled = (rc_sstatus(SSTATUS_SIE) & SSTATUS_SIE) != 0; if (cpu->intr_off_count == 0) { cpu->intr_enabled_before_off = old_intr_enabled; } cpu->intr_off_count += 1; } static void pop_intr_off() { Cpu* cpu = current_cpu(); if (cpu->intr_off_count == 0) { panic("pop_intr_off: count = 0"); } cpu->intr_off_count -= 1; if (cpu->intr_off_count == 0 && cpu->intr_enabled_before_off) { s_sstatus(SSTATUS_SIE); } } void spinlock_init(Spinlock* lock) { lock->next_ticket = 0; lock->serving = 0; lock->locking = NO_CPU; } void spinlock_acquire(Spinlock* lock) { if (current_cpu_holding(lock)) { panic("spinlock_acquire: already held"); } push_intr_off(); uint32_t ticket = atomic_fetch_add(&lock->next_ticket, 1, MEM_RELAXED); while (atomic_load(&lock->serving, MEM_RELAXED) != ticket) {} atomic_fence(MEM_ACQUIRE); lock->locking = current_cpu()->id; } void spinlock_release(Spinlock* lock) { if (!current_cpu_holding(lock)) { panic("spinlock_release: not held"); } lock->locking = NO_CPU; atomic_fetch_add(&lock->serving, 1, MEM_RELEASE); pop_intr_off(); }