slkern/proc.c

755 lines
18 KiB
C
Raw Permalink Normal View History

// TODO: CHECK/REPLACE/UPDATE OLD CODE (this file is based on xv6)
#include "types.h"
#include "param.h"
#include "memlayout.h"
#include "riscv.h"
#include "sched.h"
#include "proc.h"
#include "defs.h"
#include "drives.h"
#include "bitarray.h"
#include "fpu.h"
#include "sched.h"
#include "kprintf.h"
extern struct bitarray *runnablearrays[NPRIO];
extern struct bitarray *exhaustedarrays[NPRIO];
extern struct bitarray *sleeping;
struct proc proc[NPROC];
struct proc *initproc;
int nextpid = 1;
sched_spinlock_t pid_lock;
extern void forkret(void);
static void freeproc(struct proc *p);
extern char trampoline[]; // trampoline.S
// helps ensure that wakeups of wait()ing
// parents are not lost. helps obey the
// memory model when using p->parent.
// must be acquired before any p->lock.
sched_spinlock_t wait_lock;
void printptr(void* x);
// Allocate a page for each process's kernel stack.
// Map it high in memory, followed by an invalid
// guard page.
void
proc_mapstacks(pagetable_t kpgtbl)
{
struct proc *p;
for(p = proc; p < &proc[NPROC]; p++) {
char *pa = kalloc();
if(pa == 0)
panic("kalloc");
uint64 va = KSTACK((int) (p - proc));
kvmmap(kpgtbl, va, (uint64)pa, PGSIZE, PTE_R | PTE_W);
}
}
// initialize the proc table.
void
procinit(void)
{
struct proc *p;
int i;
for (i = 0; i < NPRIO; i++) {
runnablearrays[i] = bitarrayalloc(NPROC);
exhaustedarrays[i] = bitarrayalloc(NPROC);
}
sleeping = bitarrayalloc(NPROC);
initlock(&pid_lock, "nextpid");
initlock(&wait_lock, "wait_lock");
for(p = proc; p < &proc[NPROC]; p++) {
initlock(&p->lock, "proc");
p->state = SCHED_STATE_UNUSED;
p->kstack = KSTACK((int) (p - proc));
}
}
// Return the current struct proc *, or zero if none.
struct proc*
myproc(void)
{
push_off();
sched_core_t *c = SCHED_CORE_THIS_NOINTERRUPTS();
struct proc *p = c->process;
pop_off();
return p;
}
int
allocpid()
{
int pid;
acquire(&pid_lock);
pid = nextpid;
nextpid = nextpid + 1;
release(&pid_lock);
return pid;
}
// Look in the process table for an SCHED_STATE_UNUSED proc.
// If found, initialize state required to run in the kernel,
// and return with p->lock held.
// If there are no free procs, or a memory allocation fails, return 0.
static struct proc*
allocproc(int withpgtbl)
{
struct proc *p;
for(p = proc; p < &proc[NPROC]; p++) {
acquire(&p->lock);
if(p->state == SCHED_STATE_UNUSED) {
goto found;
} else {
release(&p->lock);
}
}
return 0;
found:
p->pid = allocpid();
p->state = SCHED_STATE_USED;
// Allocate a trapframe page.
if((p->trapframe = (sched_frame_t *)kalloc()) == 0){
freeproc(p);
release(&p->lock);
return 0;
}
// An empty user page table.
if (withpgtbl) {
p->pagetable = proc_pagetable(p);
if(p->pagetable == 0){
freeproc(p);
release(&p->lock);
return 0;
}
}
// Set up new context to start executing at forkret,
// which returns to user space.
memset(&p->context, 0, sizeof(sched_context_t));
p->context.ra = (uint64)(&forkret);
p->context.sp = p->kstack + PGSIZE;
return p;
}
// free a proc structure and the data hanging from it,
// including user pages.
// p->lock must be held.
static void
freeproc(struct proc *p)
{
if(p->trapframe)
kfree((void*)(p->trapframe));
p->trapframe = 0;
if(p->pagetable)
proc_freepagetable(p->pagetable, p->mainthread /*&& p->mainthread != p*/ ? 0 : p->sz, 0);
p->pagetable = 0;
p->sz = 0;
p->pid = 0;
p->parent = 0;
p->mainthread = 0;
p->drives = 0;
p->cwdrive = 0;
p->fpu_active = 0;
p->fpu_saved = 0;
p->name[0] = 0;
p->chan = 0;
p->killed = 0;
p->xstate = 0;
p->state = SCHED_STATE_UNUSED;
p->timeslice = 0;
//sched_restate_alreadylocked(p, SCHED_STATE_UNUSED);
}
// Create a user page table for a given process, with no user memory,
// but with trampoline and trapframe pages.
pagetable_t
proc_pagetable(struct proc *p)
{
pagetable_t pagetable;
// An empty page table.
pagetable = uvmcreate();
if(pagetable == 0)
return 0;
// map the trampoline code (for system call return)
// at the highest user virtual address.
// only the supervisor uses it, on the way
// to/from user space, so not PTE_U.
if(mappages(pagetable, TRAMPOLINE, PGSIZE,
(uint64)trampoline, PTE_R | PTE_X) < 0){
uvmfree(pagetable, 0, 0);
return 0;
}
// map the trapframe page just below the trampoline page, for
// trampoline.S.
if(mappages(pagetable, TRAPFRAME, PGSIZE,
(uint64)(p->trapframe), PTE_R | PTE_W) < 0){
uvmunmap(pagetable, TRAMPOLINE, 1, 0);
uvmfree(pagetable, 0, 0);
return 0;
}
return pagetable;
}
// Free a process's page table, and free the
// physical memory it refers to.
void
proc_freepagetable(pagetable_t pagetable, uint64 sz, int reallyfree)
{
uvmunmap(pagetable, TRAMPOLINE, 1, 0);
uvmunmap(pagetable, TRAPFRAME, 1, 0);
uvmfree(pagetable, sz, reallyfree);
}
// a user program that calls exec("/init")
// assembled from ../user/initcode.S
// od -t xC ../user/initcode
uchar initcode[] = {
/* version with execve: */
0x17, 0x05, 0x00, 0x00, 0x03, 0x35, 0x05, 0x05, 0x97, 0x05, 0x00, 0x00, 0x83, 0xb5, 0x05, 0x05,
0x13, 0x86, 0x85, 0x00, 0x93, 0x08, 0x30, 0x02, 0x73, 0x00, 0x00, 0x00, 0x89, 0x48, 0x73, 0x00,
0x00, 0x00, 0xef, 0xf0, 0xbf, 0xff, 0x2f, 0x69, 0x6e, 0x69, 0x74, 0x00, 0x00, 0x01, 0x00, 0x13,
0x26, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x26, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
int first;
// Set up first user process.
void
userinit(void)
{
struct proc *p;
first = 1;
p = allocproc(1);
initproc = p;
// allocate one user page and copy initcode's instructions
// and data into it.
uvmfirst(p->pagetable, initcode, 0x160 /*sizeof(initcode)*/);
p->sz = PGSIZE;
//printf("Initcode starts with 0x%x, 0x%x, 0x%x ...\n", initcode[0], initcode[1], initcode[2]);
//printf("... 0x%x, 0x%x, 0x%x ...\n", initcode[3], initcode[4], initcode[5]);
//printf("... 0x%x, 0x%x, 0x%x ...\n", initcode[6], initcode[7], initcode[8]);
// prepare for the very first "return" from kernel to user.
p->trapframe->epc = 0; // user program counter
p->trapframe->sp = PGSIZE; // user stack pointer
safestrcpy(p->name, "initcode", PROC_NAME_SIZE /*sizeof(p->name)*/);
printf("drives_alloc()...\n");
p->drives = drives_alloc();
printf("drives_alloc() returned %p\n", p->drives);
printf("diskio_cache_alloc()...\n");
diskio_cache_t* cache = diskio_cache_alloc(NBUF, DISKIO_BLOCK_SIZE);
printf("diskio_cache_alloc() returned %p\n", cache);
printf("fsinstance_alloc()...\n");
fsinstance_t* instance = fsinstance_alloc();
printf("fsinstance_alloc() returned %p\n", instance);
instance->fslog_device = ROOTDEV; // Very important, this must be set before the call to fsinstance_lookup() or everything will break later
instance->cache = cache;
printf("drives_setup()...\n");
int dn = drives_setup(p->drives, DRIVES_HANDLER_FS, instance, "BOOT");
printf("drives_setup() returned %d\n", dn);
p->cwdrive = drives_open(p->drives, "BOOT", 0ULL, 0ULL);
p->cwd = fsinstance_lookup(p->drives->entries[p->cwdrive].handlerdata, "/");
p->prio = INITPRIO;
p->maxprio = 1;
p->affinitymask = 0xFFFFFFFFFFFFFFFFULL;
sched_restate_alreadylocked(p, SCHED_STATE_RUNNABLE);
release(&p->lock);
//bitarray_set(runnablearrays[INITPRIO], (int) (p - proc), 1);
}
void setupthread(struct proc* p, struct proc* main, uint64 func, uint64 stack, uint64 arg) {
p->mainthread = main;
p->trapframe->epc = func;
p->trapframe->sp = stack;
p->trapframe->a0 = arg;
}
void cleanupthread(struct proc* p) {
p->mainthread = 0;
p->pagetable = 0;
}
// Grow or shrink user memory by n bytes.
// Return 0 on success, -1 on failure.
int
growproc(int n)
{
uint64 sz;
struct proc *p = myproc();
sz = p->sz;
if(n > 0){
if((sz = uvmalloc(p->pagetable, sz, sz + n, PTE_W)) == 0) {
return -1;
}
} else if(n < 0){
sz = uvmdealloc(p->pagetable, sz, sz + n);
}
p->sz = sz;
return 0;
}
// Create a new process, copying the parent.
// Sets up child kernel stack to return as if from fork() system call.
int
fork(void)
{
int i, pid;
struct proc *np;
struct proc *p = myproc();
// Allocate process.
if((np = allocproc(1)) == 0){
return -1;
}
// Copy user memory from parent to child.
if(uvmcopy(p->pagetable, np->pagetable, p->sz) < 0){
freeproc(np);
release(&(np->lock));
return -1;
}
np->sz = p->sz;
// copy saved user registers.
memmove(np->trapframe, p->trapframe, sizeof(sched_frame_t));
// Cause fork to return 0 in the child.
np->trapframe->a0 = 0;
np->prio = p->prio;
np->maxprio = p->maxprio;
np->affinitymask = p->affinitymask;
// increment reference counts on open file descriptors.
for(i = 0; i < NOFILE; i++)
if(p->ofile[i])
np->ofile[i] = filedup(p->ofile[i]);
np->cwdrive = drives_dup(p->drives, p->cwdrive);
np->drives = p->drives; // refcount should be handled by copying drive number
np->cwd = fsinstance_inode_copyref(p->cwd);
safestrcpy(np->name, p->name, PROC_NAME_SIZE /*sizeof(p->name)*/);
pid = np->pid;
release(&(np->lock));
acquire(&wait_lock);
np->parent = p;
np->mainthread = 0ULL; // Never inherit threads, start in single-thread mode.
release(&wait_lock);
acquire(&(np->lock));
sched_restate_alreadylocked(np, SCHED_STATE_RUNNABLE);
release(&(np->lock));
return pid;
}
// NOTE: This is partly new code but the rest was copied from fork()
int
thrd(uint64 fnc, uint64 stk, uint64 arg)
{
struct proc* np = allocproc(1);
struct proc* p = myproc();
if (np) {
struct proc* mainthread = p->mainthread;
if (mainthread == 0ULL) {
mainthread = p;
p->mainthread = p;
}
//np->pagetable = p->pagetable;
if(uvmcopyshallow(p->pagetable, np->pagetable, p->sz) < 0){
freeproc(np);
release(&(np->lock));
return -1;
}
np->sz = p->sz; // TODO...
// copy saved user registers.
//memmove(np->trapframe, p->trapframe, sizeof(trapframe_t));
setupthread(np, mainthread, fnc, stk, arg);
np->prio = p->prio;
np->maxprio = p->maxprio;
np->affinitymask = p->affinitymask;
// increment reference counts on open file descriptors.
for(int i = 0; i < NOFILE; i++)
if(p->ofile[i])
np->ofile[i] = filedup(p->ofile[i]);
np->cwdrive = drives_dup(p->drives, p->cwdrive);
np->drives = p->drives; // refcount should be handled by copying drive number
np->cwd = fsinstance_inode_copyref(p->cwd);
safestrcpy(np->name, p->name, PROC_NAME_SIZE /*sizeof(p->name)*/);
int pid = np->pid;
release(&(np->lock));
acquire(&wait_lock);
np->parent = p;
//np->mainthread = 0ULL;
release(&wait_lock);
acquire(&(np->lock));
sched_restate_alreadylocked(np, SCHED_STATE_RUNNABLE);
release(&(np->lock));
return pid;
}
return -1;
}
// Pass p's abandoned children to init.
// Caller must hold wait_lock.
void
reparent(struct proc *p)
{
struct proc *pp;
for(pp = proc; pp < &proc[NPROC]; pp++){
if(pp->parent == p){
pp->parent = initproc;
sched_wake(initproc);
}
}
}
// Exit the current process. Does not return.
// An exited process remains in the zombie state
// until its parent calls wait().
void
exit(int status)
{
struct proc *p = myproc();
// Shutdown FPU
fpu_status_write(0);
if(p == initproc)
panic("init exiting");
// If this is the main thread of a multithreaded program, kill all threads before continuing
/*if (p->mainthread == p) {
for (int i = 0; i < NPROC; i++) {
struct proc* thr = &proc[i];
if (thr != p) {
acquire(&thr->lock);
int tpid = thr->pid;
int shouldkill = thr->mainthread == p;
release(&thr->lock);
if (shouldkill) kill(tpid);
}
}
} else if (p->mainthread) {
cleanupthread(p);
}*/
// Close all open files.
for(int fd = 0; fd < NOFILE; fd++){
if(p->ofile[fd]){
struct file *f = p->ofile[fd];
fileclose(f);
p->ofile[fd] = 0;
}
}
fsinstance_t* instance = drives_fsbegin(p->drives, p->cwdrive, "");
fsinstance_inode_unget(p->cwd);
drives_fsend(p->drives, instance);
p->cwdrive = drives_close(p->drives, p->cwdrive);
p->cwd = 0;
p->drives = 0;
acquire(&wait_lock);
// Give any children to init.
reparent(p);
// Parent might be sleeping in wait().
sched_wake(p->parent);
acquire(&p->lock);
p->xstate = status;
p->state = SCHED_STATE_ZOMBIE;
// unnecessary as proc is SCHED_STATE_RUNNING: sched_restate_alreadylocked(p, SCHED_STATE_ZOMBIE);
release(&wait_lock);
// Jump into the scheduler, never to return.
sched();
panic("zombie exit");
}
// Wait for a child process to exit and return its pid.
// Return -1 if this process has no children.
int
wait(uint64 addr)
{
struct proc *pp;
int havekids, pid;
struct proc *p = myproc();
acquire(&wait_lock);
for(;;){
// Scan through table looking for exited children.
havekids = 0;
for(pp = proc; pp < &proc[NPROC]; pp++){
if(pp->parent == p){
// make sure the child isn't still in exit() or swtch() [now sched_switchcontext()]
acquire(&pp->lock);
havekids = 1;
if(pp->state == SCHED_STATE_ZOMBIE){
// Found one.
pid = pp->pid;
if(addr != 0 && copyout(p->pagetable, addr, (char *)&pp->xstate,
sizeof(int /*pp->xstate*/)) < 0) {
release(&pp->lock);
release(&wait_lock);
return -1;
}
freeproc(pp);
release(&pp->lock);
release(&wait_lock);
return pid;
}
release(&pp->lock);
}
}
// No point waiting if we don't have any children.
if(!havekids || killed(p)){
release(&wait_lock);
return -1;
}
// Wait for a child to exit.
sleep(p, &wait_lock); //DOC: wait-sleep
}
}
// Switch to scheduler. Must hold only p->lock
// and have changed proc->state. Saves and restores
// intena because intena is a property of this
// kernel thread, not this CPU. It should
// be proc->intena and proc->noff, but that would
// break in the few places where a lock is held but
// there's no process.
void
sched(void)
{
int intena;
struct proc *p = myproc();
if(!holding(&p->lock))
panic("sched p->lock");
if(SCHED_CORE_THIS_NOINTERRUPTS()->interruptsoff_depth != 1)
panic("sched locks");
if(p->state == SCHED_STATE_RUNNING)
panic("sched running");
if(intr_get())
panic("sched interruptible");
intena = SCHED_CORE_THIS_NOINTERRUPTS()->interruptsoff_wereinterruptson;
sched_switchcontext(&p->context, &SCHED_CORE_THIS_NOINTERRUPTS()->registers);
//swtch(&p->context, &mycpu()->context);
SCHED_CORE_THIS_NOINTERRUPTS()->interruptsoff_wereinterruptson = intena;
}
// Give up the CPU for one scheduling round.
void
yield(void)
{
struct proc *p = myproc();
bitarray_set(runnablearrays[p->prio], (int) (p - proc), 1);
acquire(&p->lock);
sched_restate_alreadylocked(p, SCHED_STATE_RUNNABLE);
sched();
release(&p->lock);
}
// A fork child's very first scheduling by scheduler()
// will swtch to forkret.
void
forkret(void)
{
//static int first = 1;
// Still holding p->lock from scheduler.
release(&myproc()->lock);
if (first) {
// File system initialization must be run in the context of a
// regular process (e.g., because it calls sleep), and thus cannot
// be run from main().
struct proc* p = myproc();
printf("fsinstance_init()...\n");
void * fsp = fsinstance_init(p->drives->entries[p->cwdrive].handlerdata, ROOTDEV);
printf("fsinstance_init() returned %p\n", fsp);
printf("diskio_mountallramdisks()...\n");
diskio_mountallramdisks(p->drives);
printf("diskio_mountallramdisks() returned.\n");
// TODO: instance->superblock = fsp;
first = 0;
// ensure other cores see first=0.
__sync_synchronize();
}
usertrapret();
}
// Atomically release lock and sleep on chan.
// Reacquires lock when awakened.
void
sleep(void *chan, sched_spinlock_t *lk)
{
struct proc *p = myproc();
// Must acquire p->lock in order to
// change p->state and then call sched.
// Once we hold p->lock, we can be
// guaranteed that we won't miss any wakeup
// (wakeup locks p->lock),
// so it's okay to release lk.
// Should be unnecessary as proc is SCHED_STATE_RUNNING:
// bitarray_set(runnables, (int) (p - proc), 0);
bitarray_set(sleeping, (int) (p - proc), 1);
acquire(&p->lock); //DOC: sleeplock1
release(lk);
// Go to sleep.
p->chan = chan;
p->state = SCHED_STATE_SLEEPING;
//sched_restate_alreadylocked(p, SCHED_STATE_SLEEPING);
sched();
// Tidy up.
p->chan = 0;
// Reacquire original lock.
release(&p->lock);
acquire(lk);
}
// Kill the process with the given pid.
// The victim won't exit until it tries to return
// to user space (see usertrap() in trap.c).
int
kill(int pid)
{
struct proc *p;
for(p = proc; p < &proc[NPROC]; p++){
acquire(&p->lock);
if(p->pid == pid){
p->killed = 1;
if(p->state == SCHED_STATE_SLEEPING){
// Wake process from sleep().
sched_restate_alreadylocked(p, SCHED_STATE_RUNNABLE);
}
release(&p->lock);
return 0;
}
release(&p->lock);
}
return -1;
}
void
setkilled(struct proc *p)
{
acquire(&p->lock);
p->killed = 1;
release(&p->lock);
}
int
killed(struct proc *p)
{
int k;
acquire(&p->lock);
k = p->killed;
release(&p->lock);
return k;
}
// Copy to either a user address, or kernel address,
// depending on usr_dst.
// Returns 0 on success, -1 on error.
int
either_copyout(int user_dst, uint64 dst, void *src, uint64 len)
{
struct proc *p = myproc();
if(user_dst){
return copyout(p->pagetable, dst, src, len);
} else {
memmove((char *)dst, src, len);
return 0;
}
}
// Copy from either a user address, or kernel address,
// depending on usr_src.
// Returns 0 on success, -1 on error.
int
either_copyin(void *dst, int user_src, uint64 src, uint64 len)
{
struct proc *p = myproc();
if(user_src){
return copyin(p->pagetable, dst, src, len);
} else {
memmove(dst, (char*)src, len);
return 0;
}
}