summaryrefslogtreecommitdiff
path: root/kernel/spinlock.c
blob: 8f9c654bdb7a4581a0696d3e326848d37e3922d5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
#include "kernel/spinlock.h"
#include "kernel/cpu.h"
#include "kernel/riscv.h"
#include "kernel/atomic.h"

#define NO_CPU 0xffff'ffff

static bool current_cpu_holding(const Spinlock* lock)
{
    return lock->locking == current_cpu()->id;
}

static void push_intr_off()
{
    Cpu* cpu = current_cpu();
    
    bool old_intr_enabled = (rc_sstatus(SSTATUS_SIE) & SSTATUS_SIE) != 0;
    if (cpu->intr_off_count == 0)
    {
        cpu->intr_enabled_before_off = old_intr_enabled;
    }

    cpu->intr_off_count += 1;
}

static void pop_intr_off()
{
    Cpu* cpu = current_cpu();

    if (cpu->intr_off_count == 0)
    {
        panic("pop_intr_off: count = 0");
    }

    cpu->intr_off_count -= 1;
    if (cpu->intr_off_count == 0 && cpu->intr_enabled_before_off)
    {
        s_sstatus(SSTATUS_SIE);
    }
}

void spinlock_init(Spinlock* lock)
{
    lock->next_ticket = 0;
    lock->serving = 0;
    lock->locking = NO_CPU;
}

void spinlock_acquire(Spinlock* lock)
{
    if (current_cpu_holding(lock))
    {
        panic("spinlock_acquire: already held");
    }

    push_intr_off();
    
    uint32_t ticket = atomic_fetch_add(&lock->next_ticket, 1, MEM_RELAXED);

    while (atomic_load(&lock->serving, MEM_RELAXED) != ticket) {}
    atomic_fence(MEM_ACQUIRE);

    lock->locking = current_cpu()->id;
}

void spinlock_release(Spinlock* lock)
{
    if (!current_cpu_holding(lock))
    {
        panic("spinlock_release: not held");
    }

    lock->locking = NO_CPU;
    atomic_fetch_add(&lock->serving, 1, MEM_RELEASE);
    
    pop_intr_off();
}