// This is NEW CODE a header to interact with the scheduler #ifndef _SCHED_H #define _SCHED_H // The size of one register on the processor. typedef unsigned long long sched_reg_t; // The context saved between scheduler states must match EXACTLY the one in // sched_switchcontext (see schedasm.S). This only needs to save the kernel's // C-like execution state. typedef struct sched_context sched_context_t; struct sched_context { // WARNING: REMEMBER TO CHANGE schedasm.S if this ordering is modified! sched_reg_t s0; sched_reg_t s1; sched_reg_t s2; sched_reg_t s3; sched_reg_t s4; sched_reg_t s5; sched_reg_t s6; sched_reg_t s7; sched_reg_t s8; sched_reg_t s9; sched_reg_t s10; sched_reg_t s11; sched_reg_t ra; sched_reg_t sp; }; // Trap frame used by the trampoline code (currently being migrated to // new code) // Data for each process is stored in the page below the user's // trampoline code, but will be at an arbitrary address in kernel-mode // (TODO: Better mapping for multithreading?). // This is currently saved/restored by the old xv6 code so will be // rearranged a bit as that is migrated (for now it should mostly match // the old layout but with new types). // See notes in trampoline.S typedef struct sched_frame sched_frame_t; struct sched_frame { // Offset 0: sched_reg_t kmode_satp; // Kernel-mode page table sched_reg_t kmode_sp; // Kernel-mode stack pointer sched_reg_t kmode_trap; // Address of usertrap() function or similar sched_reg_t epc; // User-mode program counter // Offset 32: sched_reg_t kmode_hartid; // Kernel-mode thread pointer register sched_reg_t ra; sched_reg_t sp; sched_reg_t gp; // Offset 64: sched_reg_t tp; sched_reg_t t0; sched_reg_t t1; sched_reg_t t2; // Offset 96 sched_reg_t s0; sched_reg_t s1; sched_reg_t a0; sched_reg_t a1; // Offset 128 sched_reg_t a2; sched_reg_t a3; sched_reg_t a4; sched_reg_t a5; // Offset 160 sched_reg_t a6; sched_reg_t a7; sched_reg_t s2; sched_reg_t s3; // Offset 192 sched_reg_t s4; sched_reg_t s5; sched_reg_t s6; sched_reg_t s7; // Offset 224: sched_reg_t s8; sched_reg_t s9; sched_reg_t s10; sched_reg_t s11; // Offset 256: sched_reg_t t3; sched_reg_t t4; sched_reg_t t5; sched_reg_t t6; }; // Switches context by storing registers into oldstruct and loading different // values from newstruct, then returns oldstruct as a value to the new context. sched_context_t* sched_switchcontext(sched_context_t* oldstruct, sched_context_t* newstruct); // Each CPU core is referred to as "core" in this system, since there may be a // need to represent more complex topologies with multiple clusters of multiple // cores etc. where each layer would otherwise all be confusable for "CPUs". // A CPU core is defined as physical or virtual hardware implementing 1 main // thread of execution. typedef struct sched_core sched_core_t; // Spinlocks are still mostly implemented by old code but are defined here // due to being tightly integrated into the scheduler. typedef struct sched_spinlock sched_spinlock_t; struct sched_spinlock { int lockvar; int padding; char* debugstring; // Name for debugging sched_core_t* core; // The CPU core that's using this lock }; typedef struct sched_sleeplock sched_sleeplock_t; struct sched_sleeplock { unsigned int islocked; int padding; sched_spinlock_t spin; // The spinlock used internally int pid; // The process id of the waiting process char* debugstring; // Name for debugging (TODO: This is kind of redundant as spinlock has a debugstring already. }; struct sched_core { struct proc* process; sched_context_t registers; int preempted; int allowstarvation; int interruptsoff_depth; int interruptsoff_wereinterruptson; }; #define SCHED_CORE_MAX 64 extern sched_core_t sched_cores[SCHED_CORE_MAX]; // These are implemented in schedasm.S sched_reg_t sched_cputhreadpointer_get(); void sched_cputhreadpointer_set(sched_reg_t value); // Returns the core number, can't be called with interrupts enabled in case process is rescheduled #define SCHED_CORE_THISNUMBER_NOINTERRUPTS() \ ((int)sched_cputhreadpointer_get()) // Returns this core's sched_core_t*, can't be called with interrupts enabled in case process is rescheduled #define SCHED_CORE_THIS_NOINTERRUPTS() \ (&sched_cores[SCHED_CORE_THISNUMBER_NOINTERRUPTS()]) typedef int sched_state_t; #define SCHED_STATE_UNUSED ((sched_state_t)0) #define SCHED_STATE_USED ((sched_state_t)1) #define SCHED_STATE_SLEEPING ((sched_state_t)2) #define SCHED_STATE_RUNNABLE ((sched_state_t)3) #define SCHED_STATE_RUNNING ((sched_state_t)4) #define SCHED_STATE_ZOMBIE ((sched_state_t)5) void sched_restate_alreadylocked(struct proc* p, sched_state_t s); typedef struct sched_task sched_task_t; typedef struct sched_thread sched_thread_t; extern int timeslice_min; extern int timeslice_max; void sched_wake(void* pointer); void sched_dumpstatus(); // Dump scheduler information to console. // From ifndef at top of file: #endif