#include "param.h" #include "proc.h" #include "mmu.h" #include "defs.h" #include "memlayout.h" #interface "vm_impl.h" /* vm_impl* createvm_impl2(); //:skip */ __code loaduvm_ptesize_checkvm_impl(struct vm_impl* vm_impl,char* addr, __code next(int ret, ...)) { if ((uint) addr %PTE_SZ != 0) { // goto panic } goto loaduvm_loopvm_impl(vm_impl, next(ret, ...)); } __code loaduvm_loopvm_impl(struct vm_impl* vm_impl, uint i, uint sz,__code next(int ret, ...)) { if (i < sz) { goto loaduvm_check_pgdir(vm_impl, next(ret, ...)); } goto loaduvm_exit(vm_impl, next(ret, ...)); } static pte_t* walkpgdir (pde_t *pgdir, const void *va, int alloc) { pde_t *pde; pte_t *pgtab; // pgdir points to the page directory, get the page direcotry entry (pde) pde = &pgdir[PDE_IDX(va)]; if (*pde & PE_TYPES) { pgtab = (pte_t*) p2v(PT_ADDR(*pde)); } else { if (!alloc || (pgtab = (pte_t*) kpt_alloc()) == 0) { return 0; } // Make sure all those PTE_P bits are zero. memset(pgtab, 0, PT_SZ); // The permissions here are overly generous, but they can // be further restricted by the permissions in the page table // entries, if necessary. *pde = v2p(pgtab) | UPDE_TYPE; } return &pgtab[PTE_IDX(va)]; } __code loaduvm_check_pgdir(struct vm_impl* vm_impl, pte_t* pte, pde_t* pgdir, uint i, char* addr, uint pa, __code next(int ret, ...)) { if ((pte = walkpgdir(pgdir, addr + i, 0)) == 0) { // goto panic } pa = PTE_ADDR(*pte); Gearef(cbc_context, vm_impl)->pte = pte; Gearef(cbc_context, vm_impl)->pgdir = pgdir; Gearef(cbc_context, vm_impl)->addr = addr; Gearef(cbc_context, vm_impl)->pa = pa; goto loaduvm_check_PTE_SZ(vm_impl, next(ret, ...)); } __code loaduvm_check_PTE_SZ(struct vm_impl* vm_impl, uint sz, uint i, uint n, struct inode* ip, uint pa, uint offset, __code next(int ret, ...)) { if (sz - i < PTE_SZ) { n = sz - i; } else { n = PTE_SZ; } if (readi(ip, p2v(pa), offset + i, n) != n) { ret = -1; goto next(ret, ...); } Gearef(cbc_context, vm_impl)->n = n; goto loaduvm_loopvm_impl(vm_impl, next(ret, ...)); } __code loaduvm_exit(struct vm_impl* vm_impl, __code next(int ret, ...)) { ret = 0; goto next(ret, ...); } struct run { struct run *next; }; struct { struct spinlock lock; struct run* freelist; } kpt_mem; static int mappages (pde_t *pgdir, void *va, uint size, uint pa, int ap) { char *a, *last; pte_t *pte; a = (char*) align_dn(va, PTE_SZ); last = (char*) align_dn((uint)va + size - 1, PTE_SZ); for (;;) { if ((pte = walkpgdir(pgdir, a, 1)) == 0) { return -1; } if (*pte & PE_TYPES) { panic("remap"); } *pte = pa | ((ap & 0x3) << 4) | PE_CACHE | PE_BUF | PTE_TYPE; if (a == last) { break; } a += PTE_SZ; pa += PTE_SZ; } return 0; } __code kpt_alloc_check_impl(struct vm_impl* vm_impl, __code next(...)) { struct run* r; if ((r = kpt_mem.freelist) != NULL ) { kpt_mem.freelist = r->next; } release(&kpt_mem.lock); if ((r == NULL) && ((r = kmalloc (PT_ORDER)) == NULL)) { // panic("oom: kpt_alloc"); // goto panic } memset(r, 0, PT_SZ); goto next((char*)r); } __code allocuvm_check_newszvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, __code next(int ret, ...)){ if (newsz >= UADDR_SZ) { goto next(0, ...); } if (newsz < oldsz) { ret = newsz; goto next(ret, ...); } char* mem; uint a = align_up(oldsz, PTE_SZ); goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, mem, a, next(ret, ...)); } __code allocuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, char* mem, uint a, __code next(int ret, ...)){ if (a < newsz) { mem = alloc_page(); if (mem == 0) { cprintf("allocuvm out of memory\n"); deallocuvm(pgdir, newsz, oldsz); goto next(0, ...); } memset(mem, 0, PTE_SZ); mappages(pgdir, (char*) a, PTE_SZ, v2p(mem), AP_KU); goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, a + PTE_SZ, next(ret, ...)); } ret = newsz; goto next(ret, ...); } __code clearpteu_check_ptevm_impl(struct vm_impl* vm_impl, pde_t* pgdir, char* uva, __code next(int ret, ...)) { pte_t *pte; pte = walkpgdir(pgdir, uva, 0); if (pte == 0) { // panic("clearpteu"); // goto panic; } // in ARM, we change the AP field (ap & 0x3) << 4) *pte = (*pte & ~(0x03 << 4)) | AP_KO << 4; goto next(ret, ...); } __code copyuvm_check_nullvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, __code next(int ret, ...)) { pde_t *d; pte_t *pte; uint pa, i, ap; char *mem; // allocate a new first level page directory d = kpt_alloc(); if (d == NULL ) { ret = NULL; goto next(ret, ...); } i = 0; goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, *d, *pte, pa, i, ap, *mem, next(ret, ...)); } __code copyuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { if (i < sz) { goto copyuvm_loop_check_walkpgdir(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); } ret = d; goto next(ret, ...); } __code copyuvm_loop_check_walkpgdir(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { if ((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) { // panic("copyuvm: pte should exist"); // goto panic(); } goto copyuvm_loop_check_pte(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); } __code copyuvm_loop_check_pte(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { if (!(*pte & PE_TYPES)) { // panic("copyuvm: page not present"); // goto panic(); } goto copyuvm_loop_check_mem(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); } __code copyuvm_loop_check_mem(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { pa = PTE_ADDR (*pte); ap = PTE_AP (*pte); if ((mem = alloc_page()) == 0) { goto copyuvm_loop_bad(vm_impl, d, next(...)); } goto copyuvm_loop_check_mappages(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); } __code copyuvm_loop_check_mappages(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) { memmove(mem, (char*) p2v(pa), PTE_SZ); if (mappages(d, (void*) i, PTE_SZ, v2p(mem), ap) < 0) { goto copyuvm_loop_bad(vm_impl, d, next(...)); } goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...)); } __code copyuvm_loop_bad(struct vm_impl* vm_impl, pde_t* d, __code next(int ret, ...)) { freevm(d); ret = 0; goto next(ret, ...); } __code uva2ka_check_pe_types(struct vm_impl* vm, pde_t* pgdir, char* uva, __code next(int ret, ...)) { pte_t* pte; pte = walkpgdir(pgdir, uva, 0); // make sure it exists if ((*pte & PE_TYPES) == 0) { ret = 0; goto next(ret, ...); } goto uva2ka_check_pte_ap(vm, pgdir, uva, pte, next(...)); } __code uva2ka_check_pte_ap(struct vm_impl* vm, pde_t* pgdir, char* uva, pte_t* pte, __code next(int ret, ...)) { // make sure it is a user page if (PTE_AP(*pte) != AP_KU) { ret = 0; goto next(ret, ...); } ret = (char*) p2v(PTE_ADDR(*pte)); goto next(ret, ...); } // flush all TLB static void flush_tlb (void) { uint val = 0; asm("MCR p15, 0, %[r], c8, c7, 0" : :[r]"r" (val):); // invalid entire data and instruction cache asm ("MCR p15,0,%[r],c7,c10,0": :[r]"r" (val):); asm ("MCR p15,0,%[r],c7,c11,0": :[r]"r" (val):); } __code paging_intvmvm_impl(struct vm_impl* vm_impl, uint phy_low, uint phy_hi, __code next(...)) { mappages (P2V(&_kernel_pgtbl), P2V(phy_low), phy_hi - phy_low, phy_low, AP_KU); flush_tlb (); goto next(...); } __code copyout_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, __code next(int ret, ...)) { if (len > 0) { va0 = align_dn(va, PTE_SZ); pa0 = uva2ka(pgdir, (char*) va0); goto copyout_loop_check_pa0(vm_impl, pgdir, va, pp, len, va0, pa0, n, next(...)); } ret = 0; goto next(ret, ...); } __code copyout_loop_check_pa0(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, uint n, __code next(int ret, ...)) { if (pa0 == 0) { ret = -1; goto next(ret, ...); } goto copyout_loop_check_n(vm_impl, pgdir, va, pp, len, va0, pa0, n, buf, next(...)); } __code copyout_loop_check_n(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, uint n, char* buf, __code next(...)) { n = PTE_SZ - (va - va0); if (n > len) { n = len; } len -= n; buf += n; va = va0 + PTE_SZ; Gearef(cbc_context, vm_impl)->n = n; Gearef(cbc_context, vm_impl)->len = len; Gearef(cbc_context, vm_impl)->buf = buf; Gearef(cbc_context, vm_impl)->va = va; goto copyout_loopvm_impl(vm_impl, pgdir, va, pp, len, va0, pa0, next(...)); } typedef struct proc proc_struct; __code switchuvm_check_pgdirvm_impl(struct vm_impl* vm_impl, proc_struct* p, __code next(...)) { //:skip uint val; pushcli(); if (p->pgdir == 0) { panic("switchuvm: no pgdir"); } val = (uint) V2P(p->pgdir) | 0x00; asm("MCR p15, 0, %[v], c2, c0, 0": :[v]"r" (val):); flush_tlb(); popcli(); goto next(...); } __code init_inituvm_check_sz(struct vm_impl* vm_impl, pde_t* pgdir, char* init, uint sz, __code next(...)) { char* mem; if (sz >= PTE_SZ) { // goto panic; // panic("inituvm: more than a page"); } mem = alloc_page(); memset(mem, 0, PTE_SZ); mappages(pgdir, 0, PTE_SZ, v2p(mem), AP_KU); memmove(mem, init, sz); goto next(...); }