Initial commit

This commit is contained in:
2024-06-02 00:26:57 +02:00
commit 45f420a338
21 changed files with 889 additions and 0 deletions

2
kernel/aplic.c Normal file
View File

@ -0,0 +1,2 @@
#include "kernel/aplic.h"
#include "kernel/spinlock.h"

5
kernel/aplic.h Normal file
View File

@ -0,0 +1,5 @@
#pragma once
#include "kernel/lib.h"

48
kernel/atomic.h Normal file
View File

@ -0,0 +1,48 @@
#pragma once
#include "kernel/lib.h"
enum MemoryOrdering
{
MEM_RELAXED = 0,
MEM_ACQUIRE = 1,
MEM_RELEASE = 2,
MEM_ACQ_REL = 3,
};
static inline uint32_t atomic_load(volatile uint32_t* v, enum MemoryOrdering o)
{
uint32_t v2;
switch (o)
{
case MEM_RELAXED: __asm__ volatile("lr.w %0, (%1)" : "=r"(v2) : "r"(v)); break;
case MEM_ACQUIRE: __asm__ volatile("lr.w.aq %0, (%1)" : "=r"(v2) : "r"(v)); break;
case MEM_RELEASE: __asm__ volatile("lr.w.rl %0, (%1)" : "=r"(v2) : "r"(v)); break;
case MEM_ACQ_REL: __asm__ volatile("lr.w.aqrl %0, (%1)" : "=r"(v2) : "r"(v)); break;
}
return v2;
}
static inline uint32_t atomic_fetch_add(volatile uint32_t* v, uint32_t a, enum MemoryOrdering o)
{
uint32_t v2;
switch (o)
{
case MEM_RELAXED: __asm__ volatile("amoadd.w %0, %1, (%2)" : "=r"(v2) : "r"(a), "r"(v)); break;
case MEM_ACQUIRE: __asm__ volatile("amoadd.w.aq %0, %1, (%2)" : "=r"(v2) : "r"(a), "r"(v)); break;
case MEM_RELEASE: __asm__ volatile("amoadd.w.rl %0, %1, (%2)" : "=r"(v2) : "r"(a), "r"(v)); break;
case MEM_ACQ_REL: __asm__ volatile("amoadd.w.aqrl %0, %1, (%2)" : "=r"(v2) : "r"(a), "r"(v)); break;
}
return v2;
}
static inline void atomic_fence(enum MemoryOrdering o)
{
switch (o)
{
case MEM_RELAXED: __asm__ volatile(""); break;
case MEM_ACQUIRE: __asm__ volatile("fence r,rw"); break;
case MEM_RELEASE: __asm__ volatile("fence rw,w"); break;
case MEM_ACQ_REL: __asm__ volatile("fence.tso"); break;
}
}

19
kernel/boot.s Normal file
View File

@ -0,0 +1,19 @@
.option norvc
.section .text.init
.global _start
_start:
.option push
.option norelax
la gp, _global_pointer
.option pop
la sp, _stack_top
la ra, 0f
call kinit
0:
wfi
j 0b

22
kernel/cpu.h Normal file
View File

@ -0,0 +1,22 @@
#pragma once
#include "kernel/lib.h"
#define MAX_CPU 16
struct Cpu
{
uint32_t id;
uint32_t intr_off_count;
bool intr_enabled_before_off;
};
typedef struct Cpu Cpu;
extern Cpu cpus[MAX_CPU];
static inline Cpu* current_cpu()
{
// @Todo current_cpu, better
return &cpus[0];
}

87
kernel/kalloc.c Normal file
View File

@ -0,0 +1,87 @@
#include "kernel/lib.h"
#include "kernel/kalloc.h"
#include "kernel/spinlock.h"
extern uint32_t _heap_start;
extern uint32_t _heap_end;
union Page
{
union Page* next;
char dummy[PAGE_SIZE];
};
typedef union Page Page;
static_assert(sizeof(Page) == PAGE_SIZE);
static inline void* align_page_down(void* p)
{
return (void*)((uint32_t)p & ~(PAGE_SIZE - 1));
}
static inline void* align_page_up(void* p)
{
return align_page_down((char*)p + PAGE_SIZE - 1);
}
static inline bool is_page_aligned(void* p)
{
return ((uint32_t)p & (PAGE_SIZE - 1)) == 0;
}
static Page* g_page_start;
static Page* g_page_end;
static Page* g_free_list;
static Spinlock g_lock;
void kalloc_init()
{
spinlock_init(&g_lock);
g_free_list = NULL;
g_page_start = (Page*)align_page_up(&_heap_start);
g_page_end = (Page*)align_page_down(&_heap_end);
for (Page* p = g_page_end - 1; p >= g_page_start; --p)
{
p->next = g_free_list;
g_free_list = p;
}
}
void* kalloc()
{
spinlock_acquire(&g_lock);
Page* page = g_free_list;
if (!page)
{
panic("kalloc: Out of memory");
}
g_free_list = page->next;
spinlock_release(&g_lock);
return page;
}
void* kzalloc()
{
void* page = kalloc();
uint32_t* p = (uint32_t*)page;
for (uint32_t i = 0; i < PAGE_SIZE / 4; ++i)
{
*p++ = 0U;
}
return page;
}
void kfree(void* ptr)
{
if (ptr < (void*)g_page_start || ptr >= (void*)g_page_end || !is_page_aligned(ptr))
{
panic("kfree: Invalid page");
}
spinlock_acquire(&g_lock);
Page* page = (Page*)ptr;
page->next = g_free_list;
g_free_list = page;
spinlock_release(&g_lock);
}

9
kernel/kalloc.h Normal file
View File

@ -0,0 +1,9 @@
#pragma once
#define PAGE_SIZE 4096
void kalloc_init();
void* kalloc();
void* kzalloc();
void kfree(void*);

12
kernel/lib.c Normal file
View File

@ -0,0 +1,12 @@
#include "kernel/lib.h"
#include "kernel/riscv.h"
__attribute__((noreturn)) void panic(const char* s)
{
// @Todo Refactor UART
volatile char* kUartBase = (volatile char*)0x1000'0000;
while (*s) *kUartBase = *s++;
hart_halt();
}

10
kernel/lib.h Normal file
View File

@ -0,0 +1,10 @@
#pragma once
#define NULL (0)
typedef unsigned int uint32_t;
typedef int int32_t;
static_assert(sizeof(uint32_t) == 4);
void panic(const char*);

57
kernel/linker.lds Normal file
View File

@ -0,0 +1,57 @@
MEMORY {
ram (wxa) : ORIGIN = 0x80000000, LENGTH = 128M
}
PHDRS {
text PT_LOAD;
rodata PT_LOAD;
data PT_LOAD;
bss PT_LOAD;
}
SECTIONS {
.text : ALIGN(4K) {
PROVIDE(_text_start = .);
*(.text.init);
*(.text .text.*);
. = ALIGN(4K);
PROVIDE(_text_end = .);
} >ram AT>ram :text
PROVIDE(_global_pointer = .);
.rodata : ALIGN(4K) {
PROVIDE(_rodata_start = .);
*(.rodata .rodata.*);
. = ALIGN(4K);
PROVIDE(_rodata_end = .);
} >ram AT>ram :rodata
.data : ALIGN(4K) {
PROVIDE(_data_start = .);
*(.data .data.*);
. = ALIGN(4K);
PROVIDE(_data_end = .);
} >ram AT>ram :data
.bss : ALIGN(4K) {
PROVIDE(_bss_start = .);
*(.sbss .sbss.*);
*(.bss .bss.*);
. = ALIGN(4K);
PROVIDE(_bss_end = .);
} >ram AT>ram :bss
. = ALIGN(4K);
PROVIDE(_stack_start = .);
PROVIDE(_stack_top = _stack_start + 0x100000);
PROVIDE(_stack_end = _stack_top);
PROVIDE(_ram_start = ORIGIN(ram));
PROVIDE(_ram_end = _ram_start + LENGTH(ram));
PROVIDE(_heap_start = _stack_end);
PROVIDE(_heap_size = ORIGIN(ram) + LENGTH(ram) - _heap_start);
PROVIDE(_heap_end = _heap_start + _heap_size);
}

99
kernel/riscv.h Normal file
View File

@ -0,0 +1,99 @@
#pragma once
#include "kernel/lib.h"
__attribute__((noreturn)) static inline void hart_halt()
{
while (true) { __asm__ volatile("wfi"); }
}
static inline void w_mideleg(uint32_t v)
{
__asm__ volatile("csrw mideleg, %0" :: "r"(v));
}
static inline void w_medeleg(uint32_t v)
{
__asm__ volatile("csrw medeleg, %0" :: "r"(v));
}
#define SSTATUS_SIE (1U << 1)
#define SSTATUS_SPIE (1U << 5)
#define SSTATUS_SPP_S (1U << 8)
static inline void w_sstatus(uint32_t v)
{
__asm__ volatile("csrw sstatus, %0" :: "r"(v));
}
static inline uint32_t r_sstatus()
{
uint32_t v;
__asm__ volatile("csrr sstatus, %0" : "=r"(v));
return v;
}
static inline void s_sstatus(uint32_t v)
{
__asm__ volatile("csrs sstatus, %0" :: "r"(v));
}
static inline uint32_t rc_sstatus(uint32_t v)
{
uint32_t v2;
__asm__ volatile("csrrc %0, sstatus, %0" : "=r"(v2) : "r"(v));
return v2;
}
#define SIE_SSIE (1U << 1)
#define SIE_STIE (1U << 5)
#define SIE_SEIE (1U << 9)
static inline void w_sie(uint32_t v)
{
__asm__ volatile("csrw sie, %0" :: "r"(v));
}
static inline void w_sepc(void* addr)
{
__asm__ volatile("csrw sepc, %0" :: "r"((uint32_t)addr));
}
static inline void w_stvec(void* addr)
{
__asm__ volatile("csrw stvec, %0" :: "r"((uint32_t)addr));
}
#define PMPCFG_R 0x01
#define PMPCFG_RW 0x03
#define PMPCFG_X 0x04
#define PMPCFG_OFF 0x00
#define PMPCFG_TOR 0x08
#define PMPCFG_NA4 0x10
#define PMPCFG_NAPOT 0x18
#define PMPCFG_LOCKED 0x80
static inline void w_pmpcfg0(uint32_t v)
{
__asm__ volatile("csrw pmpcfg0, %0" :: "r"(v));
}
#define PMPADDR_SHIFT 2
static inline void w_pmpaddr0(uint32_t addr)
{
__asm__ volatile("csrw pmpaddr0, %0" :: "r"(addr));
}
static inline void sfence_vma()
{
__asm__ volatile("sfence.vma zero, zero");
}
#define SATP_MODE_BARE 0
#define SATP_MODE_SV32 0x8000'0000
static inline void w_satp(uint32_t v)
{
__asm__ volatile("csrw satp, %0" :: "r"(v));
}

77
kernel/spinlock.c Normal file
View File

@ -0,0 +1,77 @@
#include "kernel/spinlock.h"
#include "kernel/cpu.h"
#include "kernel/riscv.h"
#include "kernel/atomic.h"
#define NO_CPU 0xffff'ffff
static bool current_cpu_holding(const Spinlock* lock)
{
return lock->locking == current_cpu()->id;
}
static void push_intr_off()
{
Cpu* cpu = current_cpu();
bool old_intr_enabled = (rc_sstatus(SSTATUS_SIE) & SSTATUS_SIE) != 0;
if (cpu->intr_off_count == 0)
{
cpu->intr_enabled_before_off = old_intr_enabled;
}
cpu->intr_off_count += 1;
}
static void pop_intr_off()
{
Cpu* cpu = current_cpu();
if (cpu->intr_off_count == 0)
{
panic("pop_intr_off: count = 0");
}
cpu->intr_off_count -= 1;
if (cpu->intr_off_count == 0 && cpu->intr_enabled_before_off)
{
s_sstatus(SSTATUS_SIE);
}
}
void spinlock_init(Spinlock* lock)
{
lock->next_ticket = 0;
lock->serving = 0;
lock->locking = NO_CPU;
}
void spinlock_acquire(Spinlock* lock)
{
if (current_cpu_holding(lock))
{
panic("spinlock_acquire: already held");
}
push_intr_off();
uint32_t ticket = atomic_fetch_add(&lock->next_ticket, 1, MEM_RELAXED);
while (atomic_load(&lock->serving, MEM_RELAXED) != ticket) {}
atomic_fence(MEM_ACQUIRE);
lock->locking = current_cpu()->id;
}
void spinlock_release(Spinlock* lock)
{
if (!current_cpu_holding(lock))
{
panic("spinlock_release: not held");
}
lock->locking = NO_CPU;
atomic_fetch_add(&lock->serving, 1, MEM_RELEASE);
pop_intr_off();
}

15
kernel/spinlock.h Normal file
View File

@ -0,0 +1,15 @@
#pragma once
#include "kernel/lib.h"
struct Spinlock
{
volatile uint32_t next_ticket;
volatile uint32_t serving;
uint32_t locking;
};
typedef struct Spinlock Spinlock;
void spinlock_init(Spinlock* lock);
void spinlock_acquire(Spinlock* lock);
void spinlock_release(Spinlock* lock);

51
kernel/start.c Normal file
View File

@ -0,0 +1,51 @@
#include "kernel/lib.h"
#include "kernel/riscv.h"
#include "kernel/kalloc.h"
#include "kernel/vm.h"
#include "kernel/cpu.h"
Cpu cpus[MAX_CPU];
extern uint32_t _bss_start;
extern uint32_t _bss_end;
extern uint32_t _ram_end;
void kstrap()
{
panic("kstrap");
}
void kstart()
{
kalloc_init();
kvm_init();
panic("kstart: end");
}
void kinit()
{
// @Todo Initialize CPUs
Cpu null_cpu = {0};
cpus[0] = null_cpu;
// Clear the BSS section
for (uint32_t* ptr = &_bss_start; ptr < &_bss_end; ++ptr)
{
*ptr = 0U;
}
w_mideleg(0xFFFF);
w_medeleg(0xFFFF);
w_sstatus(SSTATUS_SPIE | SSTATUS_SPP_S);
w_sie(SIE_SEIE);
w_sepc(&kstart);
w_stvec(&kstrap);
w_pmpcfg0(PMPCFG_RW | PMPCFG_X | PMPCFG_TOR);
w_pmpaddr0(((uint32_t)&_ram_end) >> PMPADDR_SHIFT);
__asm__ volatile("sret");
}

108
kernel/vm.c Normal file
View File

@ -0,0 +1,108 @@
#include "kernel/vm.h"
#include "kernel/kalloc.h"
#include "kernel/lib.h"
#include "kernel/riscv.h"
enum
{
VM_VALID = 1,
VM_USER = 16,
VM_GLOBAL = 32,
VM_ACCESSED = 64,
VM_DIRTY = 128,
};
struct PageTable
{
uint32_t entries[1024];
};
typedef struct PageTable PageTable;
static_assert(sizeof(PageTable) == PAGE_SIZE);
static PageTable* kroot = nullptr;
static inline bool is_aligned(uint32_t p)
{
return (p & (PAGE_SIZE - 1)) == 0;
}
static inline void* offset_page(void* p)
{
return (void*)((char*)p + PAGE_SIZE);
}
void kvm_map_one(PageTable* root, void* va, void* pa, uint32_t mode)
{
uint32_t vpn[] = {
((uint32_t)va >> 12) & 0x3ff,
((uint32_t)va >> 22) & 0x3ff,
};
uint32_t fl = root->entries[vpn[1]];
if (!(fl & VM_VALID))
{
PageTable* child = (PageTable*)kzalloc();
fl = (((uint32_t)child >> 2) & 0xffff'fc00) | VM_VALID;
root->entries[vpn[1]] = fl;
}
PageTable* child = (PageTable*)((fl & 0xffff'fc00) << 2);
uint32_t ppn[] = {
((uint32_t)pa >> 12) & 0x3ff,
((uint32_t)pa >> 22) & 0x3ff,
};
child->entries[vpn[0]] = (ppn[1] << 20) | (ppn[0] << 10) | VM_VALID | mode;
}
void kvm_map(void* va, void* pa, uint32_t size, uint32_t mode)
{
if (!is_aligned((uint32_t)va)) panic("kvm_map: virtual address not aligned");
if (!is_aligned((uint32_t)pa)) panic("kvm_map: physical address not aligned");
if (!is_aligned(size)) panic("kvm_map: size not aligned");
for (; size > 0; size -= PAGE_SIZE)
{
kvm_map_one(kroot, va, pa, mode);
va = offset_page(va);
pa = offset_page(pa);
}
}
extern char _text_start;
extern char _text_end;
extern char _rodata_start;
extern char _rodata_end;
extern char _data_start;
extern char _data_end;
extern char _bss_start;
extern char _bss_end;
extern char _stack_start;
extern char _stack_end;
extern char _heap_start;
extern char _heap_end;
void kvm_init()
{
kroot = (PageTable*)kzalloc();
kvm_map(&_text_start, &_text_start, &_text_end - &_text_start, VM_R | VM_X);
kvm_map(&_rodata_start, &_rodata_start, &_rodata_end - &_rodata_start, VM_R);
kvm_map(&_data_start, &_data_start, &_data_end - &_data_start, VM_RW);
kvm_map(&_bss_start, &_bss_start, &_bss_end - &_bss_start, VM_RW);
kvm_map(&_heap_start, &_heap_start, &_heap_end - &_heap_start, VM_RW);
kvm_map(&_stack_start, &_stack_start, &_stack_end - &_stack_start, VM_RW);
kvm_map((void*)0x1000'0000, (void*)0x1000'0000, PAGE_SIZE, VM_RW); // UART
kvm_map((void*)0x0c00'0000, (void*)0x0c00'0000, 0x8000, VM_RW); // APLIC-M
kvm_map((void*)0x0d00'0000, (void*)0x0d00'0000, 0x8000, VM_RW); // APLIC-S
w_satp(SATP_MODE_SV32 | ((uint32_t)kroot >> 12));
sfence_vma();
}

13
kernel/vm.h Normal file
View File

@ -0,0 +1,13 @@
#pragma once
#include "kernel/lib.h"
enum
{
VM_R = 2,
VM_RW = 6,
VM_X = 8,
};
void kvm_init();
void kvm_map(void* va, void* pa, uint32_t size, uint32_t mode);