Skip to content

Commit

Permalink
mostly running init proc (go)
Browse files Browse the repository at this point in the history
  • Loading branch information
tcfw committed Jun 3, 2024
1 parent c1548b5 commit 732fee5
Show file tree
Hide file tree
Showing 55 changed files with 1,921 additions and 619 deletions.
175 changes: 74 additions & 101 deletions arch/aarch64/context.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,16 +35,16 @@ void save_to_context(context_t *ctx, uintptr_t trapFrame)
ctx->regs[i] = *reg++;

// // processor states
// uint64_t elr, sp, spsr = 0;
// __asm__ volatile("mrs %0, ELR_EL1"
// : "=r"(elr));
// __asm__ volatile("mrs %0, SP_EL0"
// : "=r"(sp));
// __asm__ volatile("mrs %0, SPSR_EL1"
// : "=r"(spsr));
// ctx->pc = elr;
// ctx->sp = sp;
// ctx->spsr = spsr;
uint64_t elr, sp, spsr = 0;
__asm__ volatile("mrs %0, ELR_EL1"
: "=r"(elr));
__asm__ volatile("mrs %0, SP_EL0"
: "=r"(sp));
__asm__ volatile("mrs %0, SPSR_EL1"
: "=r"(spsr));
ctx->pc = elr;
ctx->sp = sp;
ctx->spsr = spsr;

// fp registers
// __asm__ volatile("fmov %0, d0"
Expand Down Expand Up @@ -179,70 +179,70 @@ void save_to_context(context_t *ctx, uintptr_t trapFrame)

void set_to_context(context_t *ctx, uintptr_t trapFrame)
{
__asm__ volatile("fmov d0, %0" ::"r"(ctx->fpregs[0]));
__asm__ volatile("fmov v0.d[1], %0" ::"r"(ctx->fpregs[1]));
__asm__ volatile("fmov d1, %0" ::"r"(ctx->fpregs[2]));
__asm__ volatile("fmov v1.d[1], %0" ::"r"(ctx->fpregs[3]));
__asm__ volatile("fmov d2, %0" ::"r"(ctx->fpregs[4]));
__asm__ volatile("fmov v2.d[1], %0" ::"r"(ctx->fpregs[5]));
__asm__ volatile("fmov d3, %0" ::"r"(ctx->fpregs[6]));
__asm__ volatile("fmov v3.d[1], %0" ::"r"(ctx->fpregs[7]));
__asm__ volatile("fmov d4, %0" ::"r"(ctx->fpregs[8]));
__asm__ volatile("fmov v4.d[1], %0" ::"r"(ctx->fpregs[9]));
__asm__ volatile("fmov d5, %0" ::"r"(ctx->fpregs[10]));
__asm__ volatile("fmov v5.d[1], %0" ::"r"(ctx->fpregs[11]));
__asm__ volatile("fmov d6, %0" ::"r"(ctx->fpregs[12]));
__asm__ volatile("fmov v6.d[1], %0" ::"r"(ctx->fpregs[13]));
__asm__ volatile("fmov d7, %0" ::"r"(ctx->fpregs[14]));
__asm__ volatile("fmov v7.d[1], %0" ::"r"(ctx->fpregs[15]));
__asm__ volatile("fmov d8, %0" ::"r"(ctx->fpregs[16]));
__asm__ volatile("fmov v8.d[1], %0" ::"r"(ctx->fpregs[17]));
__asm__ volatile("fmov d9, %0" ::"r"(ctx->fpregs[18]));
__asm__ volatile("fmov v9.d[1], %0" ::"r"(ctx->fpregs[19]));
__asm__ volatile("fmov d10, %0" ::"r"(ctx->fpregs[20]));
__asm__ volatile("fmov v10.d[1], %0" ::"r"(ctx->fpregs[21]));
__asm__ volatile("fmov d11, %0" ::"r"(ctx->fpregs[22]));
__asm__ volatile("fmov v11.d[1], %0" ::"r"(ctx->fpregs[23]));
__asm__ volatile("fmov d12, %0" ::"r"(ctx->fpregs[24]));
__asm__ volatile("fmov v12.d[1], %0" ::"r"(ctx->fpregs[25]));
__asm__ volatile("fmov d13, %0" ::"r"(ctx->fpregs[26]));
__asm__ volatile("fmov v13.d[1], %0" ::"r"(ctx->fpregs[27]));
__asm__ volatile("fmov d14, %0" ::"r"(ctx->fpregs[28]));
__asm__ volatile("fmov v14.d[1], %0" ::"r"(ctx->fpregs[29]));
__asm__ volatile("fmov d15, %0" ::"r"(ctx->fpregs[30]));
__asm__ volatile("fmov v15.d[1], %0" ::"r"(ctx->fpregs[31]));
__asm__ volatile("fmov d16, %0" ::"r"(ctx->fpregs[32]));
__asm__ volatile("fmov v16.d[1], %0" ::"r"(ctx->fpregs[33]));
__asm__ volatile("fmov d17, %0" ::"r"(ctx->fpregs[34]));
__asm__ volatile("fmov v17.d[1], %0" ::"r"(ctx->fpregs[35]));
__asm__ volatile("fmov d18, %0" ::"r"(ctx->fpregs[36]));
__asm__ volatile("fmov v18.d[1], %0" ::"r"(ctx->fpregs[37]));
__asm__ volatile("fmov d19, %0" ::"r"(ctx->fpregs[38]));
__asm__ volatile("fmov v19.d[1], %0" ::"r"(ctx->fpregs[39]));
__asm__ volatile("fmov d20, %0" ::"r"(ctx->fpregs[40]));
__asm__ volatile("fmov v20.d[1], %0" ::"r"(ctx->fpregs[41]));
__asm__ volatile("fmov d21, %0" ::"r"(ctx->fpregs[42]));
__asm__ volatile("fmov v21.d[1], %0" ::"r"(ctx->fpregs[43]));
__asm__ volatile("fmov d22, %0" ::"r"(ctx->fpregs[44]));
__asm__ volatile("fmov v22.d[1], %0" ::"r"(ctx->fpregs[45]));
__asm__ volatile("fmov d23, %0" ::"r"(ctx->fpregs[46]));
__asm__ volatile("fmov v23.d[1], %0" ::"r"(ctx->fpregs[47]));
__asm__ volatile("fmov d24, %0" ::"r"(ctx->fpregs[48]));
__asm__ volatile("fmov v24.d[1], %0" ::"r"(ctx->fpregs[49]));
__asm__ volatile("fmov d25, %0" ::"r"(ctx->fpregs[50]));
__asm__ volatile("fmov v25.d[1], %0" ::"r"(ctx->fpregs[51]));
__asm__ volatile("fmov d26, %0" ::"r"(ctx->fpregs[52]));
__asm__ volatile("fmov v26.d[1], %0" ::"r"(ctx->fpregs[53]));
__asm__ volatile("fmov d27, %0" ::"r"(ctx->fpregs[54]));
__asm__ volatile("fmov v27.d[1], %0" ::"r"(ctx->fpregs[55]));
__asm__ volatile("fmov d28, %0" ::"r"(ctx->fpregs[56]));
__asm__ volatile("fmov v28.d[1], %0" ::"r"(ctx->fpregs[57]));
__asm__ volatile("fmov d29, %0" ::"r"(ctx->fpregs[58]));
__asm__ volatile("fmov v29.d[1], %0" ::"r"(ctx->fpregs[59]));
__asm__ volatile("fmov d30, %0" ::"r"(ctx->fpregs[60]));
__asm__ volatile("fmov v30.d[1], %0" ::"r"(ctx->fpregs[61]));
__asm__ volatile("fmov d31, %0" ::"r"(ctx->fpregs[62]));
__asm__ volatile("fmov v31.d[1], %0" ::"r"(ctx->fpregs[63]));
// __asm__ volatile("fmov d0, %0" ::"r"(ctx->fpregs[0]));
// __asm__ volatile("fmov v0.d[1], %0" ::"r"(ctx->fpregs[1]));
// __asm__ volatile("fmov d1, %0" ::"r"(ctx->fpregs[2]));
// __asm__ volatile("fmov v1.d[1], %0" ::"r"(ctx->fpregs[3]));
// __asm__ volatile("fmov d2, %0" ::"r"(ctx->fpregs[4]));
// __asm__ volatile("fmov v2.d[1], %0" ::"r"(ctx->fpregs[5]));
// __asm__ volatile("fmov d3, %0" ::"r"(ctx->fpregs[6]));
// __asm__ volatile("fmov v3.d[1], %0" ::"r"(ctx->fpregs[7]));
// __asm__ volatile("fmov d4, %0" ::"r"(ctx->fpregs[8]));
// __asm__ volatile("fmov v4.d[1], %0" ::"r"(ctx->fpregs[9]));
// __asm__ volatile("fmov d5, %0" ::"r"(ctx->fpregs[10]));
// __asm__ volatile("fmov v5.d[1], %0" ::"r"(ctx->fpregs[11]));
// __asm__ volatile("fmov d6, %0" ::"r"(ctx->fpregs[12]));
// __asm__ volatile("fmov v6.d[1], %0" ::"r"(ctx->fpregs[13]));
// __asm__ volatile("fmov d7, %0" ::"r"(ctx->fpregs[14]));
// __asm__ volatile("fmov v7.d[1], %0" ::"r"(ctx->fpregs[15]));
// __asm__ volatile("fmov d8, %0" ::"r"(ctx->fpregs[16]));
// __asm__ volatile("fmov v8.d[1], %0" ::"r"(ctx->fpregs[17]));
// __asm__ volatile("fmov d9, %0" ::"r"(ctx->fpregs[18]));
// __asm__ volatile("fmov v9.d[1], %0" ::"r"(ctx->fpregs[19]));
// __asm__ volatile("fmov d10, %0" ::"r"(ctx->fpregs[20]));
// __asm__ volatile("fmov v10.d[1], %0" ::"r"(ctx->fpregs[21]));
// __asm__ volatile("fmov d11, %0" ::"r"(ctx->fpregs[22]));
// __asm__ volatile("fmov v11.d[1], %0" ::"r"(ctx->fpregs[23]));
// __asm__ volatile("fmov d12, %0" ::"r"(ctx->fpregs[24]));
// __asm__ volatile("fmov v12.d[1], %0" ::"r"(ctx->fpregs[25]));
// __asm__ volatile("fmov d13, %0" ::"r"(ctx->fpregs[26]));
// __asm__ volatile("fmov v13.d[1], %0" ::"r"(ctx->fpregs[27]));
// __asm__ volatile("fmov d14, %0" ::"r"(ctx->fpregs[28]));
// __asm__ volatile("fmov v14.d[1], %0" ::"r"(ctx->fpregs[29]));
// __asm__ volatile("fmov d15, %0" ::"r"(ctx->fpregs[30]));
// __asm__ volatile("fmov v15.d[1], %0" ::"r"(ctx->fpregs[31]));
// __asm__ volatile("fmov d16, %0" ::"r"(ctx->fpregs[32]));
// __asm__ volatile("fmov v16.d[1], %0" ::"r"(ctx->fpregs[33]));
// __asm__ volatile("fmov d17, %0" ::"r"(ctx->fpregs[34]));
// __asm__ volatile("fmov v17.d[1], %0" ::"r"(ctx->fpregs[35]));
// __asm__ volatile("fmov d18, %0" ::"r"(ctx->fpregs[36]));
// __asm__ volatile("fmov v18.d[1], %0" ::"r"(ctx->fpregs[37]));
// __asm__ volatile("fmov d19, %0" ::"r"(ctx->fpregs[38]));
// __asm__ volatile("fmov v19.d[1], %0" ::"r"(ctx->fpregs[39]));
// __asm__ volatile("fmov d20, %0" ::"r"(ctx->fpregs[40]));
// __asm__ volatile("fmov v20.d[1], %0" ::"r"(ctx->fpregs[41]));
// __asm__ volatile("fmov d21, %0" ::"r"(ctx->fpregs[42]));
// __asm__ volatile("fmov v21.d[1], %0" ::"r"(ctx->fpregs[43]));
// __asm__ volatile("fmov d22, %0" ::"r"(ctx->fpregs[44]));
// __asm__ volatile("fmov v22.d[1], %0" ::"r"(ctx->fpregs[45]));
// __asm__ volatile("fmov d23, %0" ::"r"(ctx->fpregs[46]));
// __asm__ volatile("fmov v23.d[1], %0" ::"r"(ctx->fpregs[47]));
// __asm__ volatile("fmov d24, %0" ::"r"(ctx->fpregs[48]));
// __asm__ volatile("fmov v24.d[1], %0" ::"r"(ctx->fpregs[49]));
// __asm__ volatile("fmov d25, %0" ::"r"(ctx->fpregs[50]));
// __asm__ volatile("fmov v25.d[1], %0" ::"r"(ctx->fpregs[51]));
// __asm__ volatile("fmov d26, %0" ::"r"(ctx->fpregs[52]));
// __asm__ volatile("fmov v26.d[1], %0" ::"r"(ctx->fpregs[53]));
// __asm__ volatile("fmov d27, %0" ::"r"(ctx->fpregs[54]));
// __asm__ volatile("fmov v27.d[1], %0" ::"r"(ctx->fpregs[55]));
// __asm__ volatile("fmov d28, %0" ::"r"(ctx->fpregs[56]));
// __asm__ volatile("fmov v28.d[1], %0" ::"r"(ctx->fpregs[57]));
// __asm__ volatile("fmov d29, %0" ::"r"(ctx->fpregs[58]));
// __asm__ volatile("fmov v29.d[1], %0" ::"r"(ctx->fpregs[59]));
// __asm__ volatile("fmov d30, %0" ::"r"(ctx->fpregs[60]));
// __asm__ volatile("fmov v30.d[1], %0" ::"r"(ctx->fpregs[61]));
// __asm__ volatile("fmov d31, %0" ::"r"(ctx->fpregs[62]));
// __asm__ volatile("fmov v31.d[1], %0" ::"r"(ctx->fpregs[63]));

uint64_t *reg = (uint64_t *)trapFrame;

Expand All @@ -255,30 +255,3 @@ void set_to_context(context_t *ctx, uintptr_t trapFrame)
for (int i = 0; i <= 30; i++)
*reg++ = ctx->regs[i];
}

void __attribute__((noinline)) switch_to_context(context_t *ctx)
{
// setup pc, sp & pstate
__asm__ volatile("msr ELR_EL1, %0" ::"r"(ctx->pc));
__asm__ volatile("msr SP_EL0, %0" ::"r"(ctx->sp));
__asm__ volatile("msr SPSR_EL1, %0" ::"r"(ctx->spsr));

__asm__ volatile("ldp x2, x3, %0" ::"rm"(ctx->regs[2]));
__asm__ volatile("ldp x4, x5, %0" ::"rm"(ctx->regs[4]));
__asm__ volatile("ldp x6, x7, %0" ::"rm"(ctx->regs[6]));
__asm__ volatile("ldp x8, x9, %0" ::"rm"(ctx->regs[8]));
__asm__ volatile("ldp x10, x11, %0" ::"rm"(ctx->regs[10]));
__asm__ volatile("ldp x12, x13, %0" ::"rm"(ctx->regs[12]));
__asm__ volatile("ldp x14, x15, %0" ::"rm"(ctx->regs[14]));
__asm__ volatile("ldp x16, x17, %0" ::"rm"(ctx->regs[16]));
__asm__ volatile("ldp x18, x19, %0" ::"rm"(ctx->regs[18]));
__asm__ volatile("ldp x20, x21, %0" ::"rm"(ctx->regs[20]));
__asm__ volatile("ldp x22, x23, %0" ::"rm"(ctx->regs[22]));
__asm__ volatile("ldp x24, x25, %0" ::"rm"(ctx->regs[24]));
__asm__ volatile("ldp x26, x27, %0" ::"rm"(ctx->regs[26]));
__asm__ volatile("ldp x28, x29, %0" ::"rm"(ctx->regs[28]));
__asm__ volatile("ldr x30, %0" ::"rm"(ctx->regs[30]));
__asm__ volatile("ldp x0, x1, %0" ::"rm"(ctx->regs[0]));

__asm__ volatile("eret");
}
53 changes: 21 additions & 32 deletions arch/aarch64/copy.S
Original file line number Diff line number Diff line change
Expand Up @@ -4,55 +4,52 @@
// x0 - src
// x1 - dst
// x2 - length
// Temp registers:
// x3/w3 - read value
// x4 - size compare
// x5 - read bytes
_copy_from_user:
.globl _copy_from_user


loop1:
mov x5, #0
loop1:
cmp x2, #0
b.eq ret2

cmp x2, #8
b.le copyByte

b.le copy1Byte
mov x4, #7
and x4, x4, x2
cmp x4, #0
b.eq copy8Byte

//TODO(tcfw) use alignment and length checks to do 8-byte xfers

copyByte:
copy1Byte:
ldtrb w3, [x0] // unprivileged

b ok1Byte
b ret3 //mark_failed_copy will bump to here

ok1Byte:
ok1Byte:
strb w3, [x1] // privileged
add x0, x0, #1
add x5, x5, #1
add x1, x1, #1
sub x2, x2, #1

b loop1

copy8Byte:
copy8Byte:
ldtr x3, [x0]

b ok8Byte
b ret3

ok8Byte:
ok8Byte:
str x3, [x1]
add x0, x0, #8
add x5, x5, #8
add x1, x1, #8
sub x2, x2, #8
b loop1

ret2:
ret2:
mov x0, x5
ret

ret3:
mov x0, x2
ret3:
mov x0, x5
isb
ret

Expand All @@ -68,31 +65,23 @@ _copy_to_user:
loop4:
cmp x2, #0
b.eq ret5

//TODO(tcfw) use alignment and length checks to do 8-byte xfers

ldrb w3, [x0] // privileged

b ok2
b ret6 //mark_failed_copy will bump to here

ok2:
ok2:
sttrb w3, [x1] // unprivileged
add x0, x0, #1
add x1, x1, #1
sub x2, x2, #1

b loop4

ret5:
ret5:
ret

ret6:
ret6:
mov x0, x2
isb
ret


mark_failed_copy:
.globl mark_failed_copy

Expand Down
37 changes: 36 additions & 1 deletion arch/aarch64/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,7 @@ k_exphandler_irq_entry:

.globl k_exphandler_irq
bl k_exphandler_irq

kexp_trapframe_pull

ERET
Expand All @@ -272,7 +273,6 @@ k_exphandler_irq_entry:
k_exphandler_fiq_entry:
kexp_trapframe_push


.globl k_exphandler_fiq
bl k_exphandler_fiq

Expand All @@ -290,3 +290,38 @@ syscall4:
.globl syscall4
SVC #0
RET

switch_to_context:
.globl switch_to_context

//ELR
ldr x1, [x0, #0x2f8]
msr ELR_EL1, x1

//SP
ldr x1, [x0, #0x300]
msr SP_EL0, x1

//SPSR
ldr x1, [x0, #0x308]
msr SPSR_EL1, x1

//Registers x2-x30, x0 & x1
LDP x2, x3, [x0, #0x10]
LDP x4, x5, [x0, #0x20]
LDP x6, x7, [x0, #0x30]
LDP x8, x9, [x0, #0x40]
LDP x10, x11, [x0, #0x50]
LDP x12, x13, [x0, #0x60]
LDP x14, x15, [x0, #0x70]
LDP x16, x17, [x0, #0x80]
LDP x18, x19, [x0, #0x90]
LDP x20, x21, [x0, #0xA0]
LDP x22, x23, [x0, #0xB0]
LDP x24, x25, [x0, #0xC0]
LDP x26, x27, [x0, #0xD0]
LDP x28, x29, [x0, #0xE0]
LDR x30, [x0, #0xF0]
LDP x0, x1, [x0]

eret
6 changes: 6 additions & 0 deletions arch/aarch64/include/elf_arch.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#ifndef _KERNEL_ARCH_ELF_ARCH_H
#define _KERNEL_ARCH_ELF_ARCH_H

#define ARCH_ELF_MACHINE (0xB7)

#endif
14 changes: 7 additions & 7 deletions arch/aarch64/include/kernel/paging.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,16 +88,9 @@
#define VM_ENTRY_VALID (1ULL << 0)
#define VM_ENTRY_ISTABLE (1ULL << 1)
#define VM_ENTRY_NONSECURE (1ULL << 5)
#define VM_ENTRY_USER (1ULL << 6)
#define VM_ENTRY_OA_MASK (0xFFFFFFFFF000ULL)
#define VM_ENTRY_UXN (1ULL << 54)
#define VM_ENTRY_PXN (1ULL << 53)

// OS params (55-58)
#define VM_ENTRY_LINKED (1ULL << 55) // symbolic link
#define VM_ENTRY_SWAPPEDOUT (1ULL << 56) // not in memory
#define VM_ENTRY_MAPPED (1ULL << 57) // mapped to device or similar region

#define VM_ENTRY_ATTR (2ULL)
#define VM_ENTRY_NG (1ULL << 11)
#define VM_ENTRY_AF (1ULL << 10)
Expand All @@ -106,6 +99,13 @@
#define VM_ENTRY_CONTIGUOUS (1ULL << 52)
#define VM_ENTRY_AP_SHIFT (6)
#define VM_ENTRY_PERM_RO (2ULL << VM_ENTRY_AP_SHIFT)
#define VM_ENTRY_PERM_W (1ULL << VM_ENTRY_AP_SHIFT)

// OS params
#define VM_ENTRY_LINKED (1ULL << 55) // symbolic link
#define VM_ENTRY_SWAPPEDOUT (1ULL << 56) // not in memory
#define VM_ENTRY_MAPPED (1ULL << 57) // mapped to device or shared region
#define VM_ENTRY_USER (1ULL << 58) // user space memory

#define VM_DESC_VALID (1ULL << 0)
#define VM_DESC_IS_DESC (1ULL << 1)
Expand Down
Loading

0 comments on commit 732fee5

Please sign in to comment.