push
This commit is contained in:
@@ -0,0 +1,49 @@
|
||||
#ifndef CAPABILITIES_H
|
||||
#define CAPABILITIES_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#define UID_ROOT 0
|
||||
#define GID_ROOT 0
|
||||
#define UID_NOBODY 65534
|
||||
#define GID_NOBODY 65534
|
||||
|
||||
#define CAP_IOPORT (1ULL << 0)
|
||||
#define CAP_RAWMEM (1ULL << 1)
|
||||
#define CAP_DMA (1ULL << 2)
|
||||
#define CAP_IRQ (1ULL << 3)
|
||||
#define CAP_KILL_ANY (1ULL << 4)
|
||||
#define CAP_SET_PRIO (1ULL << 5)
|
||||
#define CAP_TASK_SPAWN (1ULL << 6)
|
||||
#define CAP_TASK_INFO (1ULL << 7)
|
||||
#define CAP_MMAP_EXEC (1ULL << 8)
|
||||
#define CAP_MMAP_PHYS (1ULL << 9)
|
||||
#define CAP_FS_ROOT (1ULL << 10)
|
||||
#define CAP_FS_OWNER (1ULL << 11)
|
||||
#define CAP_NET_RAW (1ULL << 12)
|
||||
#define CAP_NET_BIND (1ULL << 13)
|
||||
#define CAP_SYSADMIN (1ULL << 14)
|
||||
#define CAP_REBOOT (1ULL << 15)
|
||||
#define CAP_MODULE (1ULL << 16)
|
||||
#define CAP_SETUID (1ULL << 17)
|
||||
#define CAP_AUDIT (1ULL << 18)
|
||||
#define CAP_PTRACE (1ULL << 19)
|
||||
#define CAP_DBG_SERIAL (1ULL << 20)
|
||||
#define CAP_ALL (~0ULL)
|
||||
#define CAP_BASIC_SET (CAP_MMAP_EXEC | CAP_TASK_SPAWN)
|
||||
#define CAP_SERVICE_SET (CAP_BASIC_SET | CAP_TASK_INFO | CAP_SET_PRIO | CAP_NET_BIND)
|
||||
|
||||
static inline bool cap_has(uint64_t caps, uint64_t cap) {
|
||||
return (caps & cap) == cap;
|
||||
}
|
||||
|
||||
static inline uint64_t cap_drop(uint64_t caps, uint64_t cap) {
|
||||
return caps & ~cap;
|
||||
}
|
||||
|
||||
static inline uint64_t cap_initial(uint32_t uid) {
|
||||
return (uid == UID_ROOT) ? CAP_ALL : CAP_BASIC_SET;
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,183 @@
|
||||
#ifndef TASK_H
|
||||
#define TASK_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include "capabilities.h"
|
||||
#include "../memory/vmm.h"
|
||||
#include "../smp/smp.h"
|
||||
|
||||
#ifndef __ATOMIC_RELAXED
|
||||
#define __ATOMIC_RELAXED 0
|
||||
#define __ATOMIC_ACQUIRE 2
|
||||
#define __ATOMIC_RELEASE 3
|
||||
#endif
|
||||
|
||||
typedef struct { volatile bool _val; } atomic_bool;
|
||||
|
||||
static inline void atomic_init_bool(atomic_bool *a, bool v) {
|
||||
__atomic_store_n(&a->_val, v, __ATOMIC_RELAXED);
|
||||
}
|
||||
static inline bool atomic_load_bool_acq(const atomic_bool *a) {
|
||||
return __atomic_load_n(&a->_val, __ATOMIC_ACQUIRE);
|
||||
}
|
||||
static inline void atomic_store_bool_rel(atomic_bool *a, bool v) {
|
||||
__atomic_store_n(&a->_val, v, __ATOMIC_RELEASE);
|
||||
}
|
||||
|
||||
static inline bool atomic_cas_bool(atomic_bool *a, bool *expected, bool desired) {
|
||||
return __atomic_compare_exchange_n(
|
||||
&a->_val, expected, desired,
|
||||
false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
typedef struct fd_table fd_table_t;
|
||||
|
||||
typedef struct {
|
||||
uint8_t data[512] __attribute__((aligned(16)));
|
||||
} fpu_state_t;
|
||||
|
||||
typedef enum {
|
||||
TASK_RUNNING = 0,
|
||||
TASK_READY,
|
||||
TASK_BLOCKED,
|
||||
TASK_ZOMBIE,
|
||||
TASK_DEAD,
|
||||
} task_state_t;
|
||||
|
||||
typedef enum {
|
||||
TASK_TYPE_KERNEL = 0,
|
||||
TASK_TYPE_USER = 1,
|
||||
} task_type_t;
|
||||
|
||||
#define MAX_PRIORITY 31
|
||||
#define DEFAULT_PRIORITY 16
|
||||
#define TASK_DEFAULT_TIMESLICE 10
|
||||
|
||||
typedef struct task {
|
||||
uint64_t rsp;
|
||||
uint64_t rip;
|
||||
uint64_t rbp_save;
|
||||
uint64_t cr3;
|
||||
int priority;
|
||||
bool runnable;
|
||||
uint8_t is_userspace;
|
||||
uint8_t _pad0[2];
|
||||
uint32_t cpu_id;
|
||||
char name[32];
|
||||
|
||||
uint32_t time_slice;
|
||||
uint32_t time_slice_init;
|
||||
uint8_t _pad1[4];
|
||||
uint64_t total_runtime;
|
||||
|
||||
fpu_state_t* fpu_state;
|
||||
bool fpu_used;
|
||||
uint8_t _pad2[3];
|
||||
uint32_t last_cpu;
|
||||
uint64_t cpu_affinity;
|
||||
|
||||
void (*entry)(void*);
|
||||
void *arg;
|
||||
|
||||
uintptr_t stack_base;
|
||||
uint64_t user_rsp;
|
||||
|
||||
task_state_t state;
|
||||
uint8_t _pad3[4];
|
||||
|
||||
struct task* next;
|
||||
|
||||
uint32_t pid;
|
||||
uint32_t ppid;
|
||||
uint32_t uid;
|
||||
uint32_t gid;
|
||||
uint64_t capabilities;
|
||||
|
||||
struct task* parent;
|
||||
struct task* children;
|
||||
struct task* sibling;
|
||||
|
||||
int exit_code;
|
||||
volatile bool pending_kill;
|
||||
uint8_t _pad4[3];
|
||||
|
||||
uintptr_t brk_start;
|
||||
uintptr_t brk_current;
|
||||
uintptr_t brk_max;
|
||||
vmm_pagemap_t* pagemap;
|
||||
|
||||
uint32_t flags;
|
||||
uint32_t wait_for_pid;
|
||||
|
||||
uint64_t wakeup_time_ns;
|
||||
|
||||
uint64_t user_saved_rip;
|
||||
uint64_t user_saved_rbp;
|
||||
uint64_t user_saved_rbx;
|
||||
uint64_t user_saved_r12;
|
||||
uint64_t user_saved_r13;
|
||||
uint64_t user_saved_r14;
|
||||
uint64_t user_saved_r15;
|
||||
uint64_t user_saved_r11;
|
||||
|
||||
fd_table_t *fd_table;
|
||||
|
||||
atomic_bool on_cpu;
|
||||
|
||||
} task_t;
|
||||
|
||||
#define TASK_FLAG_TRACE (1 << 0)
|
||||
#define TASK_FLAG_VFORK (1 << 1)
|
||||
#define TASK_FLAG_FORK (1 << 2)
|
||||
#define TASK_FLAG_STARTED (1 << 3)
|
||||
#define TASK_FLAG_OWN_PAGEMAP (1 << 4)
|
||||
#define TASK_FLAG_STACK_DEFERRED (1 << 5)
|
||||
#define TASK_FLAG_DESTROYED (1U << 31)
|
||||
|
||||
_Static_assert(offsetof(task_t, rsp) == 0, "task_t: rsp");
|
||||
_Static_assert(offsetof(task_t, entry) == 120, "task_t: entry — update TASK_ENTRY_OFFSET");
|
||||
_Static_assert(offsetof(task_t, arg) == 128, "task_t: arg — update TASK_ARG_OFFSET");
|
||||
_Static_assert(offsetof(task_t, stack_base) == 136, "task_t: stack_base");
|
||||
_Static_assert(offsetof(task_t, user_rsp) == 144, "task_t: user_rsp — update TASK_USER_RSP_OFFSET");
|
||||
_Static_assert(offsetof(task_t, user_saved_rip) == 272, "task_t: user_saved_rip");
|
||||
_Static_assert(offsetof(task_t, user_saved_rbp) == 280, "task_t: user_saved_rbp — update TASK_USER_SAVED_RBP_OFFSET");
|
||||
|
||||
extern task_t* ready_queues[MAX_PRIORITY + 1];
|
||||
extern task_t* current_task[MAX_CPUS];
|
||||
|
||||
void sched_init(void);
|
||||
void sched_reschedule(void);
|
||||
void sched_print_stats(void);
|
||||
void task_yield(void);
|
||||
|
||||
task_t* task_create(const char* name, void (*entry)(void*), void* arg, int priority);
|
||||
|
||||
task_t* task_create_user(const char* name, uintptr_t entry, uintptr_t user_rsp, uint64_t cr3, int priority, vmm_pagemap_t* pagemap, uint32_t uid, uint32_t gid);
|
||||
|
||||
__attribute__((noreturn)) void task_exit(void);
|
||||
void task_kill(task_t* task);
|
||||
void task_destroy(task_t* task);
|
||||
task_t* task_fork(task_t* parent);
|
||||
task_t* task_find_by_pid(uint32_t pid);
|
||||
uint32_t task_alloc_pid(void);
|
||||
void task_reparent(task_t* child, task_t* new_parent);
|
||||
|
||||
#include "spinlock.h"
|
||||
extern spinlock_t children_lock;
|
||||
void task_wakeup_waiters(uint32_t pid);
|
||||
void task_unblock(task_t* t);
|
||||
void sched_wakeup_sleepers(uint64_t now_ns);
|
||||
task_t* task_find_foreground(void);
|
||||
extern volatile uint32_t g_foreground_pid;
|
||||
void task_set_foreground(uint32_t pid);
|
||||
|
||||
extern void context_switch(task_t* old, task_t* next, task_t** current_task_slot, uint64_t new_cr3);
|
||||
extern void first_task_start(task_t* task);
|
||||
extern void task_trampoline(void);
|
||||
extern void task_trampoline_user(void);
|
||||
extern void task_trampoline_fork(void);
|
||||
extern void fpu_save(fpu_state_t* state);
|
||||
extern void fpu_restore(fpu_state_t* state);
|
||||
#endif
|
||||
@@ -0,0 +1,44 @@
|
||||
#ifndef SPINLOCK_H
|
||||
#define SPINLOCK_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
typedef struct {
|
||||
volatile uint32_t ticket;
|
||||
volatile uint32_t serving;
|
||||
} spinlock_t;
|
||||
|
||||
#define SPINLOCK_INIT { .ticket = 0, .serving = 0 }
|
||||
|
||||
static inline void spinlock_acquire(spinlock_t* lock) {
|
||||
uint32_t my_ticket = __atomic_fetch_add(&lock->ticket, 1, __ATOMIC_RELAXED);
|
||||
while (__atomic_load_n(&lock->serving, __ATOMIC_ACQUIRE) != my_ticket)
|
||||
asm volatile("pause");
|
||||
}
|
||||
|
||||
static inline void spinlock_release(spinlock_t* lock) {
|
||||
__atomic_fetch_add(&lock->serving, 1, __ATOMIC_RELEASE);
|
||||
}
|
||||
|
||||
static inline int spinlock_try_acquire(spinlock_t* lock) {
|
||||
uint32_t serving = __atomic_load_n(&lock->serving, __ATOMIC_RELAXED);
|
||||
uint32_t ticket = __atomic_load_n(&lock->ticket, __ATOMIC_RELAXED);
|
||||
if (serving != ticket) return 0;
|
||||
uint32_t expected = serving;
|
||||
return __atomic_compare_exchange_n(&lock->ticket, &expected, serving + 1,
|
||||
0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline uint64_t spinlock_acquire_irqsave(spinlock_t* lock) {
|
||||
uint64_t flags;
|
||||
asm volatile("pushfq; pop %0; cli" : "=r"(flags) :: "memory");
|
||||
spinlock_acquire(lock);
|
||||
return flags;
|
||||
}
|
||||
|
||||
static inline void spinlock_release_irqrestore(spinlock_t* lock, uint64_t flags) {
|
||||
spinlock_release(lock);
|
||||
asm volatile("push %0; popfq" :: "r"(flags) : "memory", "cc");
|
||||
}
|
||||
|
||||
#endif
|
||||
Reference in New Issue
Block a user