summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSteven Le Rouzic <steven.lerouzic@gmail.com>2024-06-02 00:26:57 +0200
committerSteven Le Rouzic <steven.lerouzic@gmail.com>2024-06-02 00:26:57 +0200
commit45f420a338ea02225bb8a98c9aca5eed8d6a23ae (patch)
tree97fdb1c8e81ba79b101b928158e5e37fa938c8f3 /kernel
Initial commit
Diffstat (limited to 'kernel')
-rw-r--r--kernel/aplic.c2
-rw-r--r--kernel/aplic.h5
-rw-r--r--kernel/atomic.h48
-rw-r--r--kernel/boot.s19
-rw-r--r--kernel/cpu.h22
-rw-r--r--kernel/kalloc.c87
-rw-r--r--kernel/kalloc.h9
-rw-r--r--kernel/lib.c12
-rw-r--r--kernel/lib.h10
-rw-r--r--kernel/linker.lds57
-rw-r--r--kernel/riscv.h99
-rw-r--r--kernel/spinlock.c77
-rw-r--r--kernel/spinlock.h15
-rw-r--r--kernel/start.c51
-rw-r--r--kernel/vm.c108
-rw-r--r--kernel/vm.h13
16 files changed, 634 insertions, 0 deletions
diff --git a/kernel/aplic.c b/kernel/aplic.c
new file mode 100644
index 0000000..3b0a2f5
--- /dev/null
+++ b/kernel/aplic.c
@@ -0,0 +1,2 @@
+#include "kernel/aplic.h"
+#include "kernel/spinlock.h"
diff --git a/kernel/aplic.h b/kernel/aplic.h
new file mode 100644
index 0000000..a3b5ccb
--- /dev/null
+++ b/kernel/aplic.h
@@ -0,0 +1,5 @@
+#pragma once
+
+#include "kernel/lib.h"
+
+
diff --git a/kernel/atomic.h b/kernel/atomic.h
new file mode 100644
index 0000000..a5b892f
--- /dev/null
+++ b/kernel/atomic.h
@@ -0,0 +1,48 @@
+#pragma once
+
+#include "kernel/lib.h"
+
+enum MemoryOrdering
+{
+ MEM_RELAXED = 0,
+ MEM_ACQUIRE = 1,
+ MEM_RELEASE = 2,
+ MEM_ACQ_REL = 3,
+};
+
+static inline uint32_t atomic_load(volatile uint32_t* v, enum MemoryOrdering o)
+{
+ uint32_t v2;
+ switch (o)
+ {
+ case MEM_RELAXED: __asm__ volatile("lr.w %0, (%1)" : "=r"(v2) : "r"(v)); break;
+ case MEM_ACQUIRE: __asm__ volatile("lr.w.aq %0, (%1)" : "=r"(v2) : "r"(v)); break;
+ case MEM_RELEASE: __asm__ volatile("lr.w.rl %0, (%1)" : "=r"(v2) : "r"(v)); break;
+ case MEM_ACQ_REL: __asm__ volatile("lr.w.aqrl %0, (%1)" : "=r"(v2) : "r"(v)); break;
+ }
+ return v2;
+}
+
+static inline uint32_t atomic_fetch_add(volatile uint32_t* v, uint32_t a, enum MemoryOrdering o)
+{
+ uint32_t v2;
+ switch (o)
+ {
+ case MEM_RELAXED: __asm__ volatile("amoadd.w %0, %1, (%2)" : "=r"(v2) : "r"(a), "r"(v)); break;
+ case MEM_ACQUIRE: __asm__ volatile("amoadd.w.aq %0, %1, (%2)" : "=r"(v2) : "r"(a), "r"(v)); break;
+ case MEM_RELEASE: __asm__ volatile("amoadd.w.rl %0, %1, (%2)" : "=r"(v2) : "r"(a), "r"(v)); break;
+ case MEM_ACQ_REL: __asm__ volatile("amoadd.w.aqrl %0, %1, (%2)" : "=r"(v2) : "r"(a), "r"(v)); break;
+ }
+ return v2;
+}
+
+static inline void atomic_fence(enum MemoryOrdering o)
+{
+ switch (o)
+ {
+ case MEM_RELAXED: __asm__ volatile(""); break;
+ case MEM_ACQUIRE: __asm__ volatile("fence r,rw"); break;
+ case MEM_RELEASE: __asm__ volatile("fence rw,w"); break;
+ case MEM_ACQ_REL: __asm__ volatile("fence.tso"); break;
+ }
+}
diff --git a/kernel/boot.s b/kernel/boot.s
new file mode 100644
index 0000000..886cb6a
--- /dev/null
+++ b/kernel/boot.s
@@ -0,0 +1,19 @@
+.option norvc
+
+.section .text.init
+
+.global _start
+_start:
+
+.option push
+.option norelax
+ la gp, _global_pointer
+.option pop
+
+ la sp, _stack_top
+ la ra, 0f
+ call kinit
+
+0:
+ wfi
+ j 0b
diff --git a/kernel/cpu.h b/kernel/cpu.h
new file mode 100644
index 0000000..10acf76
--- /dev/null
+++ b/kernel/cpu.h
@@ -0,0 +1,22 @@
+#pragma once
+
+#include "kernel/lib.h"
+
+#define MAX_CPU 16
+
+struct Cpu
+{
+ uint32_t id;
+
+ uint32_t intr_off_count;
+ bool intr_enabled_before_off;
+};
+typedef struct Cpu Cpu;
+
+extern Cpu cpus[MAX_CPU];
+
+static inline Cpu* current_cpu()
+{
+ // @Todo current_cpu, better
+ return &cpus[0];
+}
diff --git a/kernel/kalloc.c b/kernel/kalloc.c
new file mode 100644
index 0000000..e197c0c
--- /dev/null
+++ b/kernel/kalloc.c
@@ -0,0 +1,87 @@
+#include "kernel/lib.h"
+#include "kernel/kalloc.h"
+#include "kernel/spinlock.h"
+
+extern uint32_t _heap_start;
+extern uint32_t _heap_end;
+
+union Page
+{
+ union Page* next;
+ char dummy[PAGE_SIZE];
+};
+typedef union Page Page;
+static_assert(sizeof(Page) == PAGE_SIZE);
+
+static inline void* align_page_down(void* p)
+{
+ return (void*)((uint32_t)p & ~(PAGE_SIZE - 1));
+}
+
+static inline void* align_page_up(void* p)
+{
+ return align_page_down((char*)p + PAGE_SIZE - 1);
+}
+
+static inline bool is_page_aligned(void* p)
+{
+ return ((uint32_t)p & (PAGE_SIZE - 1)) == 0;
+}
+
+static Page* g_page_start;
+static Page* g_page_end;
+static Page* g_free_list;
+static Spinlock g_lock;
+
+void kalloc_init()
+{
+ spinlock_init(&g_lock);
+
+ g_free_list = NULL;
+ g_page_start = (Page*)align_page_up(&_heap_start);
+ g_page_end = (Page*)align_page_down(&_heap_end);
+
+ for (Page* p = g_page_end - 1; p >= g_page_start; --p)
+ {
+ p->next = g_free_list;
+ g_free_list = p;
+ }
+}
+
+void* kalloc()
+{
+ spinlock_acquire(&g_lock);
+ Page* page = g_free_list;
+ if (!page)
+ {
+ panic("kalloc: Out of memory");
+ }
+ g_free_list = page->next;
+ spinlock_release(&g_lock);
+ return page;
+}
+
+void* kzalloc()
+{
+ void* page = kalloc();
+ uint32_t* p = (uint32_t*)page;
+ for (uint32_t i = 0; i < PAGE_SIZE / 4; ++i)
+ {
+ *p++ = 0U;
+ }
+ return page;
+}
+
+void kfree(void* ptr)
+{
+ if (ptr < (void*)g_page_start || ptr >= (void*)g_page_end || !is_page_aligned(ptr))
+ {
+ panic("kfree: Invalid page");
+ }
+
+ spinlock_acquire(&g_lock);
+ Page* page = (Page*)ptr;
+ page->next = g_free_list;
+ g_free_list = page;
+ spinlock_release(&g_lock);
+}
diff --git a/kernel/kalloc.h b/kernel/kalloc.h
new file mode 100644
index 0000000..2571dba
--- /dev/null
+++ b/kernel/kalloc.h
@@ -0,0 +1,9 @@
+#pragma once
+
+#define PAGE_SIZE 4096
+
+void kalloc_init();
+void* kalloc();
+void* kzalloc();
+void kfree(void*);
+
diff --git a/kernel/lib.c b/kernel/lib.c
new file mode 100644
index 0000000..fff4c15
--- /dev/null
+++ b/kernel/lib.c
@@ -0,0 +1,12 @@
+#include "kernel/lib.h"
+#include "kernel/riscv.h"
+
+__attribute__((noreturn)) void panic(const char* s)
+{
+ // @Todo Refactor UART
+
+ volatile char* kUartBase = (volatile char*)0x1000'0000;
+ while (*s) *kUartBase = *s++;
+
+ hart_halt();
+}
diff --git a/kernel/lib.h b/kernel/lib.h
new file mode 100644
index 0000000..c325dfd
--- /dev/null
+++ b/kernel/lib.h
@@ -0,0 +1,10 @@
+#pragma once
+
+#define NULL (0)
+
+typedef unsigned int uint32_t;
+typedef int int32_t;
+
+static_assert(sizeof(uint32_t) == 4);
+
+void panic(const char*);
diff --git a/kernel/linker.lds b/kernel/linker.lds
new file mode 100644
index 0000000..64ea771
--- /dev/null
+++ b/kernel/linker.lds
@@ -0,0 +1,57 @@
+MEMORY {
+ ram (wxa) : ORIGIN = 0x80000000, LENGTH = 128M
+}
+
+PHDRS {
+ text PT_LOAD;
+ rodata PT_LOAD;
+ data PT_LOAD;
+ bss PT_LOAD;
+}
+
+SECTIONS {
+ .text : ALIGN(4K) {
+ PROVIDE(_text_start = .);
+ *(.text.init);
+ *(.text .text.*);
+ . = ALIGN(4K);
+ PROVIDE(_text_end = .);
+ } >ram AT>ram :text
+
+ PROVIDE(_global_pointer = .);
+
+ .rodata : ALIGN(4K) {
+ PROVIDE(_rodata_start = .);
+ *(.rodata .rodata.*);
+ . = ALIGN(4K);
+ PROVIDE(_rodata_end = .);
+ } >ram AT>ram :rodata
+
+ .data : ALIGN(4K) {
+ PROVIDE(_data_start = .);
+ *(.data .data.*);
+ . = ALIGN(4K);
+ PROVIDE(_data_end = .);
+ } >ram AT>ram :data
+
+ .bss : ALIGN(4K) {
+ PROVIDE(_bss_start = .);
+ *(.sbss .sbss.*);
+ *(.bss .bss.*);
+ . = ALIGN(4K);
+ PROVIDE(_bss_end = .);
+ } >ram AT>ram :bss
+
+ . = ALIGN(4K);
+ PROVIDE(_stack_start = .);
+
+ PROVIDE(_stack_top = _stack_start + 0x100000);
+ PROVIDE(_stack_end = _stack_top);
+
+ PROVIDE(_ram_start = ORIGIN(ram));
+ PROVIDE(_ram_end = _ram_start + LENGTH(ram));
+
+ PROVIDE(_heap_start = _stack_end);
+ PROVIDE(_heap_size = ORIGIN(ram) + LENGTH(ram) - _heap_start);
+ PROVIDE(_heap_end = _heap_start + _heap_size);
+}
diff --git a/kernel/riscv.h b/kernel/riscv.h
new file mode 100644
index 0000000..64202cc
--- /dev/null
+++ b/kernel/riscv.h
@@ -0,0 +1,99 @@
+#pragma once
+
+#include "kernel/lib.h"
+
+__attribute__((noreturn)) static inline void hart_halt()
+{
+ while (true) { __asm__ volatile("wfi"); }
+}
+
+static inline void w_mideleg(uint32_t v)
+{
+ __asm__ volatile("csrw mideleg, %0" :: "r"(v));
+}
+
+static inline void w_medeleg(uint32_t v)
+{
+ __asm__ volatile("csrw medeleg, %0" :: "r"(v));
+}
+
+#define SSTATUS_SIE (1U << 1)
+#define SSTATUS_SPIE (1U << 5)
+#define SSTATUS_SPP_S (1U << 8)
+
+static inline void w_sstatus(uint32_t v)
+{
+ __asm__ volatile("csrw sstatus, %0" :: "r"(v));
+}
+
+static inline uint32_t r_sstatus()
+{
+ uint32_t v;
+ __asm__ volatile("csrr sstatus, %0" : "=r"(v));
+ return v;
+}
+
+static inline void s_sstatus(uint32_t v)
+{
+ __asm__ volatile("csrs sstatus, %0" :: "r"(v));
+}
+
+static inline uint32_t rc_sstatus(uint32_t v)
+{
+ uint32_t v2;
+ __asm__ volatile("csrrc %0, sstatus, %0" : "=r"(v2) : "r"(v));
+ return v2;
+}
+
+#define SIE_SSIE (1U << 1)
+#define SIE_STIE (1U << 5)
+#define SIE_SEIE (1U << 9)
+
+static inline void w_sie(uint32_t v)
+{
+ __asm__ volatile("csrw sie, %0" :: "r"(v));
+}
+
+static inline void w_sepc(void* addr)
+{
+ __asm__ volatile("csrw sepc, %0" :: "r"((uint32_t)addr));
+}
+
+static inline void w_stvec(void* addr)
+{
+ __asm__ volatile("csrw stvec, %0" :: "r"((uint32_t)addr));
+}
+
+#define PMPCFG_R 0x01
+#define PMPCFG_RW 0x03
+#define PMPCFG_X 0x04
+#define PMPCFG_OFF 0x00
+#define PMPCFG_TOR 0x08
+#define PMPCFG_NA4 0x10
+#define PMPCFG_NAPOT 0x18
+#define PMPCFG_LOCKED 0x80
+
+static inline void w_pmpcfg0(uint32_t v)
+{
+ __asm__ volatile("csrw pmpcfg0, %0" :: "r"(v));
+}
+
+#define PMPADDR_SHIFT 2
+
+static inline void w_pmpaddr0(uint32_t addr)
+{
+ __asm__ volatile("csrw pmpaddr0, %0" :: "r"(addr));
+}
+
+static inline void sfence_vma()
+{
+ __asm__ volatile("sfence.vma zero, zero");
+}
+
+#define SATP_MODE_BARE 0
+#define SATP_MODE_SV32 0x8000'0000
+
+static inline void w_satp(uint32_t v)
+{
+ __asm__ volatile("csrw satp, %0" :: "r"(v));
+}
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
new file mode 100644
index 0000000..8f9c654
--- /dev/null
+++ b/kernel/spinlock.c
@@ -0,0 +1,77 @@
+#include "kernel/spinlock.h"
+#include "kernel/cpu.h"
+#include "kernel/riscv.h"
+#include "kernel/atomic.h"
+
+#define NO_CPU 0xffff'ffff
+
+static bool current_cpu_holding(const Spinlock* lock)
+{
+ return lock->locking == current_cpu()->id;
+}
+
+static void push_intr_off()
+{
+ Cpu* cpu = current_cpu();
+
+ bool old_intr_enabled = (rc_sstatus(SSTATUS_SIE) & SSTATUS_SIE) != 0;
+ if (cpu->intr_off_count == 0)
+ {
+ cpu->intr_enabled_before_off = old_intr_enabled;
+ }
+
+ cpu->intr_off_count += 1;
+}
+
+static void pop_intr_off()
+{
+ Cpu* cpu = current_cpu();
+
+ if (cpu->intr_off_count == 0)
+ {
+ panic("pop_intr_off: count = 0");
+ }
+
+ cpu->intr_off_count -= 1;
+ if (cpu->intr_off_count == 0 && cpu->intr_enabled_before_off)
+ {
+ s_sstatus(SSTATUS_SIE);
+ }
+}
+
+void spinlock_init(Spinlock* lock)
+{
+ lock->next_ticket = 0;
+ lock->serving = 0;
+ lock->locking = NO_CPU;
+}
+
+void spinlock_acquire(Spinlock* lock)
+{
+ if (current_cpu_holding(lock))
+ {
+ panic("spinlock_acquire: already held");
+ }
+
+ push_intr_off();
+
+ uint32_t ticket = atomic_fetch_add(&lock->next_ticket, 1, MEM_RELAXED);
+
+ while (atomic_load(&lock->serving, MEM_RELAXED) != ticket) {}
+ atomic_fence(MEM_ACQUIRE);
+
+ lock->locking = current_cpu()->id;
+}
+
+void spinlock_release(Spinlock* lock)
+{
+ if (!current_cpu_holding(lock))
+ {
+ panic("spinlock_release: not held");
+ }
+
+ lock->locking = NO_CPU;
+ atomic_fetch_add(&lock->serving, 1, MEM_RELEASE);
+
+ pop_intr_off();
+}
diff --git a/kernel/spinlock.h b/kernel/spinlock.h
new file mode 100644
index 0000000..5e9ac40
--- /dev/null
+++ b/kernel/spinlock.h
@@ -0,0 +1,15 @@
+#pragma once
+
+#include "kernel/lib.h"
+
+struct Spinlock
+{
+ volatile uint32_t next_ticket;
+ volatile uint32_t serving;
+ uint32_t locking;
+};
+typedef struct Spinlock Spinlock;
+
+void spinlock_init(Spinlock* lock);
+void spinlock_acquire(Spinlock* lock);
+void spinlock_release(Spinlock* lock);
diff --git a/kernel/start.c b/kernel/start.c
new file mode 100644
index 0000000..b7170ae
--- /dev/null
+++ b/kernel/start.c
@@ -0,0 +1,51 @@
+#include "kernel/lib.h"
+#include "kernel/riscv.h"
+#include "kernel/kalloc.h"
+#include "kernel/vm.h"
+#include "kernel/cpu.h"
+
+Cpu cpus[MAX_CPU];
+
+extern uint32_t _bss_start;
+extern uint32_t _bss_end;
+extern uint32_t _ram_end;
+
+void kstrap()
+{
+ panic("kstrap");
+}
+
+void kstart()
+{
+ kalloc_init();
+ kvm_init();
+
+ panic("kstart: end");
+}
+
+void kinit()
+{
+ // @Todo Initialize CPUs
+ Cpu null_cpu = {0};
+ cpus[0] = null_cpu;
+
+ // Clear the BSS section
+ for (uint32_t* ptr = &_bss_start; ptr < &_bss_end; ++ptr)
+ {
+ *ptr = 0U;
+ }
+
+ w_mideleg(0xFFFF);
+ w_medeleg(0xFFFF);
+
+ w_sstatus(SSTATUS_SPIE | SSTATUS_SPP_S);
+ w_sie(SIE_SEIE);
+ w_sepc(&kstart);
+ w_stvec(&kstrap);
+
+ w_pmpcfg0(PMPCFG_RW | PMPCFG_X | PMPCFG_TOR);
+ w_pmpaddr0(((uint32_t)&_ram_end) >> PMPADDR_SHIFT);
+
+ __asm__ volatile("sret");
+}
+
diff --git a/kernel/vm.c b/kernel/vm.c
new file mode 100644
index 0000000..c1adbe7
--- /dev/null
+++ b/kernel/vm.c
@@ -0,0 +1,108 @@
+#include "kernel/vm.h"
+#include "kernel/kalloc.h"
+#include "kernel/lib.h"
+#include "kernel/riscv.h"
+
+enum
+{
+ VM_VALID = 1,
+ VM_USER = 16,
+ VM_GLOBAL = 32,
+ VM_ACCESSED = 64,
+ VM_DIRTY = 128,
+};
+
+struct PageTable
+{
+ uint32_t entries[1024];
+};
+typedef struct PageTable PageTable;
+static_assert(sizeof(PageTable) == PAGE_SIZE);
+
+static PageTable* kroot = nullptr;
+
+static inline bool is_aligned(uint32_t p)
+{
+ return (p & (PAGE_SIZE - 1)) == 0;
+}
+
+static inline void* offset_page(void* p)
+{
+ return (void*)((char*)p + PAGE_SIZE);
+}
+
+void kvm_map_one(PageTable* root, void* va, void* pa, uint32_t mode)
+{
+ uint32_t vpn[] = {
+ ((uint32_t)va >> 12) & 0x3ff,
+ ((uint32_t)va >> 22) & 0x3ff,
+ };
+
+ uint32_t fl = root->entries[vpn[1]];
+ if (!(fl & VM_VALID))
+ {
+ PageTable* child = (PageTable*)kzalloc();
+ fl = (((uint32_t)child >> 2) & 0xffff'fc00) | VM_VALID;
+ root->entries[vpn[1]] = fl;
+ }
+
+ PageTable* child = (PageTable*)((fl & 0xffff'fc00) << 2);
+
+ uint32_t ppn[] = {
+ ((uint32_t)pa >> 12) & 0x3ff,
+ ((uint32_t)pa >> 22) & 0x3ff,
+ };
+
+ child->entries[vpn[0]] = (ppn[1] << 20) | (ppn[0] << 10) | VM_VALID | mode;
+}
+
+void kvm_map(void* va, void* pa, uint32_t size, uint32_t mode)
+{
+ if (!is_aligned((uint32_t)va)) panic("kvm_map: virtual address not aligned");
+ if (!is_aligned((uint32_t)pa)) panic("kvm_map: physical address not aligned");
+ if (!is_aligned(size)) panic("kvm_map: size not aligned");
+
+ for (; size > 0; size -= PAGE_SIZE)
+ {
+ kvm_map_one(kroot, va, pa, mode);
+ va = offset_page(va);
+ pa = offset_page(pa);
+ }
+}
+
+extern char _text_start;
+extern char _text_end;
+
+extern char _rodata_start;
+extern char _rodata_end;
+
+extern char _data_start;
+extern char _data_end;
+
+extern char _bss_start;
+extern char _bss_end;
+
+extern char _stack_start;
+extern char _stack_end;
+
+extern char _heap_start;
+extern char _heap_end;
+
+void kvm_init()
+{
+ kroot = (PageTable*)kzalloc();
+
+ kvm_map(&_text_start, &_text_start, &_text_end - &_text_start, VM_R | VM_X);
+ kvm_map(&_rodata_start, &_rodata_start, &_rodata_end - &_rodata_start, VM_R);
+ kvm_map(&_data_start, &_data_start, &_data_end - &_data_start, VM_RW);
+ kvm_map(&_bss_start, &_bss_start, &_bss_end - &_bss_start, VM_RW);
+ kvm_map(&_heap_start, &_heap_start, &_heap_end - &_heap_start, VM_RW);
+ kvm_map(&_stack_start, &_stack_start, &_stack_end - &_stack_start, VM_RW);
+
+ kvm_map((void*)0x1000'0000, (void*)0x1000'0000, PAGE_SIZE, VM_RW); // UART
+ kvm_map((void*)0x0c00'0000, (void*)0x0c00'0000, 0x8000, VM_RW); // APLIC-M
+ kvm_map((void*)0x0d00'0000, (void*)0x0d00'0000, 0x8000, VM_RW); // APLIC-S
+
+ w_satp(SATP_MODE_SV32 | ((uint32_t)kroot >> 12));
+ sfence_vma();
+}
diff --git a/kernel/vm.h b/kernel/vm.h
new file mode 100644
index 0000000..7b2e02e
--- /dev/null
+++ b/kernel/vm.h
@@ -0,0 +1,13 @@
+#pragma once
+
+#include "kernel/lib.h"
+
+enum
+{
+ VM_R = 2,
+ VM_RW = 6,
+ VM_X = 8,
+};
+
+void kvm_init();
+void kvm_map(void* va, void* pa, uint32_t size, uint32_t mode);