1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
|
#pragma once
#include "kernel/lib.h"
enum MemoryOrdering
{
MEM_RELAXED = 0,
MEM_ACQUIRE = 1,
MEM_RELEASE = 2,
MEM_ACQ_REL = 3,
};
static inline uint32_t atomic_load(volatile uint32_t* v, enum MemoryOrdering o)
{
uint32_t v2;
switch (o)
{
case MEM_RELAXED: __asm__ volatile("lr.w %0, (%1)" : "=r"(v2) : "r"(v)); break;
case MEM_ACQUIRE: __asm__ volatile("lr.w.aq %0, (%1)" : "=r"(v2) : "r"(v)); break;
case MEM_RELEASE: __asm__ volatile("lr.w.rl %0, (%1)" : "=r"(v2) : "r"(v)); break;
case MEM_ACQ_REL: __asm__ volatile("lr.w.aqrl %0, (%1)" : "=r"(v2) : "r"(v)); break;
}
return v2;
}
static inline uint32_t atomic_fetch_add(volatile uint32_t* v, uint32_t a, enum MemoryOrdering o)
{
uint32_t v2;
switch (o)
{
case MEM_RELAXED: __asm__ volatile("amoadd.w %0, %1, (%2)" : "=r"(v2) : "r"(a), "r"(v)); break;
case MEM_ACQUIRE: __asm__ volatile("amoadd.w.aq %0, %1, (%2)" : "=r"(v2) : "r"(a), "r"(v)); break;
case MEM_RELEASE: __asm__ volatile("amoadd.w.rl %0, %1, (%2)" : "=r"(v2) : "r"(a), "r"(v)); break;
case MEM_ACQ_REL: __asm__ volatile("amoadd.w.aqrl %0, %1, (%2)" : "=r"(v2) : "r"(a), "r"(v)); break;
}
return v2;
}
static inline void atomic_fence(enum MemoryOrdering o)
{
switch (o)
{
case MEM_RELAXED: __asm__ volatile(""); break;
case MEM_ACQUIRE: __asm__ volatile("fence r,rw"); break;
case MEM_RELEASE: __asm__ volatile("fence rw,w"); break;
case MEM_ACQ_REL: __asm__ volatile("fence.tso"); break;
}
}
|