#pragma once

#include "kernel/lib.h"

enum MemoryOrdering
{
    MEM_RELAXED = 0,
    MEM_ACQUIRE = 1,
    MEM_RELEASE = 2,
    MEM_ACQ_REL = 3,
};

static inline uint32_t atomic_load(volatile uint32_t* v, enum MemoryOrdering o)
{
    uint32_t v2;
    switch (o)
    {
        case MEM_RELAXED: __asm__ volatile("lr.w      %0, (%1)" : "=r"(v2) : "r"(v)); break;
        case MEM_ACQUIRE: __asm__ volatile("lr.w.aq   %0, (%1)" : "=r"(v2) : "r"(v)); break;
        case MEM_RELEASE: __asm__ volatile("lr.w.rl   %0, (%1)" : "=r"(v2) : "r"(v)); break;
        case MEM_ACQ_REL: __asm__ volatile("lr.w.aqrl %0, (%1)" : "=r"(v2) : "r"(v)); break;
    }
    return v2;
}

static inline uint32_t atomic_fetch_add(volatile uint32_t* v, uint32_t a, enum MemoryOrdering o)
{
    uint32_t v2;
    switch (o)
    {
        case MEM_RELAXED: __asm__ volatile("amoadd.w      %0, %1, (%2)" : "=r"(v2) : "r"(a), "r"(v)); break;
        case MEM_ACQUIRE: __asm__ volatile("amoadd.w.aq   %0, %1, (%2)" : "=r"(v2) : "r"(a), "r"(v)); break;
        case MEM_RELEASE: __asm__ volatile("amoadd.w.rl   %0, %1, (%2)" : "=r"(v2) : "r"(a), "r"(v)); break;
        case MEM_ACQ_REL: __asm__ volatile("amoadd.w.aqrl %0, %1, (%2)" : "=r"(v2) : "r"(a), "r"(v)); break;
    }
    return v2;
}

static inline void atomic_fence(enum MemoryOrdering o)
{
    switch (o)
    {
        case MEM_RELAXED: __asm__ volatile(""); break;
        case MEM_ACQUIRE: __asm__ volatile("fence r,rw"); break;
        case MEM_RELEASE: __asm__ volatile("fence rw,w"); break;
        case MEM_ACQ_REL: __asm__ volatile("fence.tso"); break;
    }
}