/* NEW CODE implementing a more extensible physical memory manager * Copyright (C) 2024 Zak Fenton * NO WARRANTY USE AT YOUR OWN RISK etc. under terms of UNLICENSE or MIT license */ #include "types.h" #include "param.h" #include "memlayout.h" #include "sched.h" #include "riscv.h" #include "defs.h" #include "kprintf.h" typedef struct page page_t; struct page { uint64 vals[512]; }; typedef struct freepage freepage_t; struct freepage { uint64 nfreehere; // This is checked against the main free count freepage_t* nextfree; }; typedef struct range range_t; struct range { int mode; int pad; uint64 start; uint64 end; }; #define NRANGE 64 range_t range[NRANGE]; sched_spinlock_t physpg_lock; freepage_t* firstfreepage; uint64 nfreepages; uint64 ntotalpages; // This is not a CPU-mappable page map just metadata page_t* metadatamap; void physpg_initbegin() { nfreepages = 0; ntotalpages = 0; for (int i = 0; i < NRANGE; i++) { range[i].mode = -1; range[i].start = 0; range[i].end = 0; } initlock(&physpg_lock, "physpg"); } uint64 physpg_freeram() { return nfreepages * PGSIZE; } uint64 physpg_totalram() { return ntotalpages * PGSIZE; } uint64* physpg_metavar(uint64 addr); void* physpg_alloc1(int mode) { freepage_t* p; acquire(&physpg_lock); if (firstfreepage) { p = firstfreepage; if (mode != PHYSPG_METADATA) { uint64* v = physpg_metavar((uint64) p); int vt = (int) ((*v)>>32); if (vt != PHYSPG_FREERAM) { panic("physpg_alloc1: memory corruption, free-listed page is not marked free in metadata"); } *v = (((uint64)mode)<<32) | 1ULL; } if (p->nfreehere != nfreepages) { printf("badptr=%p nfreehere=%d nfreepages=%d\n", p, (int) (p->nfreehere), (int) nfreepages); panic("physpg_alloc1: memory corruption, mismatch of free counts"); } firstfreepage = p->nextfree; nfreepages--; } else { p = 0; } release(&physpg_lock); return p; } void physpg_free1(int mode, void* physpg) { //printf("freeing %p\n", physpg); freepage_t* p = physpg; acquire(&physpg_lock); if (metadatamap && mode != PHYSPG_METADATA) { uint64* v = physpg_metavar((uint64) p); int vt = (int) ((*v)>>32); if (vt != mode) { panic("physpg_free1: memory corruption, mismatch of metadata modes"); } *v = (((uint64)PHYSPG_FREERAM)<<32); } p->nextfree = firstfreepage; if (firstfreepage && firstfreepage->nfreehere != nfreepages) { panic("physpg_free1: memory corruption, mismatch of free counts"); } p->nfreehere = ++nfreepages; firstfreepage = p; release(&physpg_lock); } void physpg_setrange(int mode, void* start, void* end) { start = (void*) PGROUNDUP((uint64) start); end = (void*) PGROUNDDOWN((uint64) end); if (end <= start) { panic("physpg_setrange: invalid range"); } // Set idx to the index of a range struct with mode=-1 int idx; for (idx = 0; range[idx].mode >= 0; idx++) { if (idx >= NRANGE) { panic("physpg_setrange: too many ranges"); } } range[idx].mode = mode; range[idx].start = (uint64) start; range[idx].end = (uint64) end; char* x; for (x = (char*)start; x < (char*) end; x += PGSIZE) { if (mode == PHYSPG_FREERAM) { physpg_free1(0, x); } ntotalpages++; } } uint64* physpg_maplookupvar(page_t* map, uint64 addr, int createmode) { if (!map) { return (void*) 0ULL; } int idx2 = (int) ((addr >> 30) & 0x1FF); int idx1 = (int) ((addr >> 21) & 0x1FF); int idx0 = (int) ((addr >> 12) & 0x1FF); //printf("looking up addr %p in %p\n", (void*)addr, map); if (!map->vals[idx2]) { if (createmode > 0) { printf("Creating level 2 table #%d\n", idx2); map->vals[idx2] = (uint64) physpg_alloc1(createmode); memset((void*) (map->vals[idx2]), 0, PGSIZE); } else { return (void*) 0ULL; } } page_t* intermediatepage = (void*) (map->vals[idx2]); if (!intermediatepage->vals[idx1]) { if (createmode > 0) { printf("Creating level 1 table #%d\n", idx1); intermediatepage->vals[idx1] = (uint64) physpg_alloc1(createmode); memset((void*) (intermediatepage->vals[idx1]), 0, PGSIZE); } else { return (void*) 0ULL; } } page_t* lookuppage = (void*) (intermediatepage->vals[idx1]); return lookuppage->vals + idx0; } uint64* physpg_metavar(uint64 addr) { uint64* varaddr = physpg_maplookupvar(metadatamap, addr, PHYSPG_METADATA); if (!varaddr) { panic("physpg_metadatavar: no variable for address"); } return varaddr; } uint64 physpg_mapget(page_t* map, uint64 addr) { uint64* var = physpg_maplookupvar(map, addr, -1); if (var) { return *var; } else { return 0; } } void physpg_mapset(page_t* map, uint64 addr, int createmode, uint64 val) { uint64* var = physpg_maplookupvar(map, addr, createmode); if (!var) { panic("physpg_mapset got NULL variable"); } *var = val; } // To finalise initialisation of the page manager, first go through the // set of ranges building a page map of metadata variables then go through the // pages used by the metadata map marking each page used to construct the map // itself as metadata. // After this, allocation of (non-metadata) pages can be streamlined to always // track the allocations and any reference counts etc. in the map. This also // allows to easily check that a kalloc'd/kfree'd page is actually in // the expected state to begin with. void physpg_initend() { uint64* v; metadatamap = physpg_alloc1(PHYSPG_METADATA); if (!metadatamap) { panic("physpg_initend: can't allocate metadata, initialisation failure"); } memset(metadatamap, 0, PGSIZE); for (int i = 0; i < NRANGE; i++) { if (range[i].mode >= 0) { for (uint64 pg = range[i].start; pg < range[i].end; pg += PGSIZE) { v = physpg_metavar(pg); *v = ((uint64)(range[i].mode))<<32; } } } v = physpg_metavar((uint64)metadatamap); *v = ((uint64)PHYSPG_METADATA)<<32; for (int outer = 0; outer < 512; outer++) { page_t* intermediatepg = (void*) (metadatamap->vals[outer]); if (intermediatepg) { v = physpg_metavar((uint64) intermediatepg); *v = ((uint64)PHYSPG_METADATA)<<32; for (int inner = 0; inner < 512; inner++) { page_t* innerpg = (void*) (intermediatepg->vals[inner]); if (innerpg) { v = physpg_metavar((uint64) innerpg); *v = ((uint64)PHYSPG_METADATA)<<32; } } } } }