changeset 16:b35cb5777a5c

Add forgotten tex and src files
author tobaru
date Thu, 06 Feb 2020 18:40:10 +0900
parents b7ae3aa6548e
children 6afd90dba6db
files paper/history.tex paper/sources.tex paper/src/dummy paper/src/failure_example_userinit paper/src/vm.h paper/src/vm_all.c paper/src/vm_impl.cbc paper/src/vm_impl_private.h paper/src/vm_impl_private_all.cbc
diffstat 9 files changed, 1146 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/paper/history.tex	Thu Feb 06 18:40:10 2020 +0900
@@ -0,0 +1,6 @@
+\chapter*{発表履歴}
+\begin{itemize}
+\item 宮城 光希, 桃原 優, 河野真治. GearsOS のモジュール化と並列 API. 情報処理学会シ
+ステムソフトウェアとオペレーティング・システム研究会 (OS), May, 2018
+\item 桃原 優, 東恩納琢偉, 河野真治.  GearsOS の Paging と Segmentation・システムソフトウェアとオペレーティング・システム(OS), May, 2019
+\end{itemize}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/paper/sources.tex	Thu Feb 06 18:40:10 2020 +0900
@@ -0,0 +1,16 @@
+\chapter{ソースコード一覧}
+
+本文中に紹介したソースコードの内、量が膨大なため一部しか掲載できなかったソースコードを示す。
+
+\section{インターフェース内の private メソッドの実装}
+
+Xv6 の元のコードである vm.c \ref{vm_c_all}  をインターフェースで定義した後に、if 文や for 文がある関数を実装側でさらに分けた vm\_impl\_private.cbc をソースコード \ref{vm_impl_private} に示す。
+
+
+
+
+
+\lstinputlisting[label=vm_c_all, caption={\footnotesize Xv6 の vm.c}]{./src/vm_all.c}
+
+
+\lstinputlisting[label=vm_impl_private, caption={\footnotesize vm の実装の private}]{./src/vm_impl_private_all.cbc}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/paper/src/dummy	Thu Feb 06 18:40:10 2020 +0900
@@ -0,0 +1,37 @@
+void dummy(struct proc *p, char _binary_initcode_start[], char _binary_initcode_size[])
+{
+    // inituvm(p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
+    goto cbc_init_vmm_dummy(&p->cbc_context, p, p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
+
+}
+
+
+
+__ncode cbc_init_vmm_dummy(struct Context* cbc_context, struct proc* p, pde_t* pgdir, char* init, uint sz){//:skip
+
+    struct vm* vm = createvm_impl(cbc_context);
+    // goto vm->init_vmm(vm, pgdir, init, sz , vm->void_ret);
+        Gearef(cbc_context, vm)->vm = (union Data*) vm;
+        Gearef(cbc_context, vm)->pgdir = pgdir;
+        Gearef(cbc_context, vm)->init = init;
+        Gearef(cbc_context, vm)->sz = sz ;
+        Gearef(cbc_context, vm)->next = C_vm_void_ret ;
+    goto meta(cbc_context, vm->init_inituvm);
+}
+
+
+void userinit(void)
+{
+    struct proc* p;
+    extern char _binary_initcode_start[], _binary_initcode_size[];
+
+    p = allocproc();
+    initContext(&p->cbc_context);
+
+    initproc = p;
+
+    if((p->pgdir = kpt_alloc()) == NULL) {
+        panic("userinit: out of memory?");
+    }
+
+    dummy(p, _binary_initcode_start, _binary_initcode_size);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/paper/src/failure_example_userinit	Thu Feb 06 18:40:10 2020 +0900
@@ -0,0 +1,19 @@
+void userinit(void) 
+{ 
+    struct proc* p; 
+    extern char _binary_initcode_start[], _binary_initcode_size[]; 
+ 
+    p = allocproc(); 
+    initContext(&p->cbc_context); 
+ 
+    initproc = p; 
+ 
+    if((p->pgdir = kpt_alloc()) == NULL) { 
+        panic("userinit: out of memory?"); 
+    } 
+ 
+    goto cbc_init_vmm_dummy(&p->cbc_context, p, p->pgdir, _binary_initcode_start, (int)_binary_initcode_size); 
+    p->sz = PTE_SZ; 
+ 
+    // craft the trapframe as if 
+    memset(p->tf, 0, sizeof(*p->tf));
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/paper/src/vm.h	Thu Feb 06 18:40:10 2020 +0900
@@ -0,0 +1,34 @@
+typedef struct vm<Type,Impl> {
+    union Data* vm;
+    uint low;
+    uint hi;
+    struct proc* p;
+    pde_t* pgdir;
+    char* init;
+    uint sz;
+    char* addr;
+    struct inode* ip;
+    uint offset;
+    uint oldsz;
+    uint newsz;
+    char* uva;
+    uint va;
+    void* pp;
+    uint len;
+    uint phy_low;
+    uint phy_hi;
+    __code init_vmm(Impl* vm, __code next(...));
+    __code kpt_freerange(Impl* vm, uint low, uint hi, __code next(...));
+    __code kpt_alloc(Impl* vm ,__code next(...));
+    __code switchuvm(Impl* vm ,struct proc* p, __code next(...));
+    __code init_inituvm(Impl* vm, pde_t* pgdir, char* init, uint sz, __code next(...));
+    __code loaduvm(Impl* vm,pde_t* pgdir, char* addr, struct inode* ip, uint offset, uint sz,  __code next(...));
+    __code allocuvm(Impl* vm, pde_t* pgdir, uint oldsz, uint newsz, __code next(...));
+    __code clearpteu(Impl* vm, pde_t* pgdir, char* uva,  __code next(...));
+    __code copyuvm(Impl* vm, pde_t* pgdir, uint sz, __code next(...));
+    __code uva2ka(Impl* vm, pde_t* pgdir, char* uva, __code next(...));
+    __code copyout(Impl* vm, pde_t* pgdir, uint va, void* pp, uint len, __code next(...));
+    __code paging_int(Impl* vm, uint phy_low, uint phy_hi, __code next(...));
+    __code void_ret(Impl* vm);
+    __code next(...);
+} vm;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/paper/src/vm_all.c	Thu Feb 06 18:40:10 2020 +0900
@@ -0,0 +1,448 @@
+#include "param.h"
+#include "types.h"
+#include "defs.h"
+#include "arm.h"
+#include "memlayout.h"
+#include "mmu.h"
+#include "proc.h"
+#include "spinlock.h"
+#include "elf.h"
+
+extern char data[];  // defined by kernel.ld
+pde_t *kpgdir;  // for use in scheduler()
+
+// Xv6 can only allocate memory in 4KB blocks. This is fine
+// for x86. ARM's page table and page directory (for 28-bit
+// user address) have a size of 1KB. kpt_alloc/free is used
+// as a wrapper to support allocating page tables during boot
+// (use the initial kernel map, and during runtime, use buddy
+// memory allocator. 
+struct run {
+    struct run *next;
+};
+
+struct {
+    struct spinlock lock;
+    struct run *freelist;
+} kpt_mem;
+
+void init_vmm (void)
+{
+    initlock(&kpt_mem.lock, "vm");
+    kpt_mem.freelist = NULL;
+}
+
+static void _kpt_free (char *v)
+{
+    struct run *r;
+
+    r = (struct run*) v;
+    r->next = kpt_mem.freelist;
+    kpt_mem.freelist = r;
+}
+
+
+static void kpt_free (char *v)
+{
+    if (v >= (char*)P2V(INIT_KERNMAP)) {
+        kfree(v, PT_ORDER);
+        return;
+    }
+    
+    acquire(&kpt_mem.lock);
+    _kpt_free (v);
+    release(&kpt_mem.lock);
+}
+
+// add some memory used for page tables (initialization code)
+void kpt_freerange (uint32 low, uint32 hi)
+{
+    while (low < hi) {
+        _kpt_free ((char*)low);
+        low += PT_SZ;
+    }
+}
+
+void* kpt_alloc (void)
+{
+    struct run *r;
+    
+    acquire(&kpt_mem.lock);
+    
+    if ((r = kpt_mem.freelist) != NULL ) {
+        kpt_mem.freelist = r->next;
+    }
+
+    release(&kpt_mem.lock);
+
+    // Allocate a PT page if no inital pages is available
+    if ((r == NULL) && ((r = kmalloc (PT_ORDER)) == NULL)) {
+        panic("oom: kpt_alloc");
+    }
+
+    memset(r, 0, PT_SZ);
+    return (char*) r;
+}
+
+// Return the address of the PTE in page directory that corresponds to
+// virtual address va.  If alloc!=0, create any required page table pages.
+static pte_t* walkpgdir (pde_t *pgdir, const void *va, int alloc)
+{
+    pde_t *pde;
+    pte_t *pgtab;
+
+    // pgdir points to the page directory, get the page direcotry entry (pde)
+    pde = &pgdir[PDE_IDX(va)];
+
+    if (*pde & PE_TYPES) {
+        pgtab = (pte_t*) p2v(PT_ADDR(*pde));
+
+    } else {
+        if (!alloc || (pgtab = (pte_t*) kpt_alloc()) == 0) {
+            return 0;
+        }
+
+        // Make sure all those PTE_P bits are zero.
+        memset(pgtab, 0, PT_SZ);
+
+        // The permissions here are overly generous, but they can
+        // be further restricted by the permissions in the page table
+        // entries, if necessary.
+        *pde = v2p(pgtab) | UPDE_TYPE;
+    }
+
+    return &pgtab[PTE_IDX(va)];
+}
+
+// Create PTEs for virtual addresses starting at va that refer to
+// physical addresses starting at pa. va and size might not
+// be page-aligned.
+static int mappages (pde_t *pgdir, void *va, uint size, uint pa, int ap)
+{
+    char *a, *last;
+    pte_t *pte;
+
+    a = (char*) align_dn(va, PTE_SZ);
+    last = (char*) align_dn((uint)va + size - 1, PTE_SZ);
+
+    for (;;) {
+        if ((pte = walkpgdir(pgdir, a, 1)) == 0) {
+            return -1;
+        }
+
+        if (*pte & PE_TYPES) {
+            panic("remap");
+        }
+
+        *pte = pa | ((ap & 0x3) << 4) | PE_CACHE | PE_BUF | PTE_TYPE;
+
+        if (a == last) {
+            break;
+        }
+
+        a += PTE_SZ;
+        pa += PTE_SZ;
+    }
+
+    return 0;
+}
+
+// flush all TLB
+static void flush_tlb (void)
+{
+    uint val = 0;
+    asm("MCR p15, 0, %[r], c8, c7, 0" : :[r]"r" (val):);
+
+    // invalid entire data and instruction cache
+    asm ("MCR p15,0,%[r],c7,c10,0": :[r]"r" (val):);
+    asm ("MCR p15,0,%[r],c7,c11,0": :[r]"r" (val):);
+}
+
+// Switch to the user page table (TTBR0)
+void switchuvm (struct proc *p)
+{
+    uint val;
+
+    pushcli();
+
+    if (p->pgdir == 0) {
+        panic("switchuvm: no pgdir");
+    }
+
+    val = (uint) V2P(p->pgdir) | 0x00;
+
+    asm("MCR p15, 0, %[v], c2, c0, 0": :[v]"r" (val):);
+    flush_tlb();
+
+    popcli();
+}
+
+// Load the initcode into address 0 of pgdir. sz must be less than a page.
+void inituvm (pde_t *pgdir, char *init, uint sz)
+{
+    char *mem;
+
+    if (sz >= PTE_SZ) {
+        panic("inituvm: more than a page");
+    }
+
+    mem = alloc_page();
+    memset(mem, 0, PTE_SZ);
+    mappages(pgdir, 0, PTE_SZ, v2p(mem), AP_KU);
+    memmove(mem, init, sz);
+}
+
+// Load a program segment into pgdir.  addr must be page-aligned
+// and the pages from addr to addr+sz must already be mapped.
+int loaduvm (pde_t *pgdir, char *addr, struct inode *ip, uint offset, uint sz)
+{
+    uint i, pa, n;
+    pte_t *pte;
+
+    if ((uint) addr % PTE_SZ != 0) {
+        panic("loaduvm: addr must be page aligned");
+    }
+
+    for (i = 0; i < sz; i += PTE_SZ) {
+        if ((pte = walkpgdir(pgdir, addr + i, 0)) == 0) {
+            panic("loaduvm: address should exist");
+        }
+
+        pa = PTE_ADDR(*pte);
+
+        if (sz - i < PTE_SZ) {
+            n = sz - i;
+        } else {
+            n = PTE_SZ;
+        }
+
+        if (readi(ip, p2v(pa), offset + i, n) != n) {
+            return -1;
+        }
+    }
+
+    return 0;
+}
+
+// Allocate page tables and physical memory to grow process from oldsz to
+// newsz, which need not be page aligned.  Returns new size or 0 on error.
+int allocuvm (pde_t *pgdir, uint oldsz, uint newsz)
+{
+    char *mem;
+    uint a;
+
+    if (newsz >= UADDR_SZ) {
+        return 0;
+    }
+
+    if (newsz < oldsz) {
+        return oldsz;
+    }
+
+    a = align_up(oldsz, PTE_SZ);
+
+    for (; a < newsz; a += PTE_SZ) {
+        mem = alloc_page();
+
+        if (mem == 0) {
+            cprintf("allocuvm out of memory\n");
+            deallocuvm(pgdir, newsz, oldsz);
+            return 0;
+        }
+
+        memset(mem, 0, PTE_SZ);
+        mappages(pgdir, (char*) a, PTE_SZ, v2p(mem), AP_KU);
+    }
+
+    return newsz;
+}
+
+// Deallocate user pages to bring the process size from oldsz to
+// newsz.  oldsz and newsz need not be page-aligned, nor does newsz
+// need to be less than oldsz.  oldsz can be larger than the actual
+// process size.  Returns the new process size.
+int deallocuvm (pde_t *pgdir, uint oldsz, uint newsz)
+{
+    pte_t *pte;
+    uint a;
+    uint pa;
+
+    if (newsz >= oldsz) {
+        return oldsz;
+    }
+
+    for (a = align_up(newsz, PTE_SZ); a < oldsz; a += PTE_SZ) {
+        pte = walkpgdir(pgdir, (char*) a, 0);
+
+        if (!pte) {
+            // pte == 0 --> no page table for this entry
+            // round it up to the next page directory
+            a = align_up (a, PDE_SZ);
+
+        } else if ((*pte & PE_TYPES) != 0) {
+            pa = PTE_ADDR(*pte);
+
+            if (pa == 0) {
+                panic("deallocuvm");
+            }
+
+            free_page(p2v(pa));
+            *pte = 0;
+        }
+    }
+
+    return newsz;
+}
+
+// Free a page table and all the physical memory pages
+// in the user part.
+void freevm (pde_t *pgdir)
+{
+    uint i;
+    char *v;
+
+    if (pgdir == 0) {
+        panic("freevm: no pgdir");
+    }
+
+    // release the user space memroy, but not page tables
+    deallocuvm(pgdir, UADDR_SZ, 0);
+
+    // release the page tables
+    for (i = 0; i < NUM_UPDE; i++) {
+        if (pgdir[i] & PE_TYPES) {
+            v = p2v(PT_ADDR(pgdir[i]));
+            kpt_free(v);
+        }
+    }
+
+    kpt_free((char*) pgdir);
+}
+
+// Clear PTE_U on a page. Used to create an inaccessible page beneath
+// the user stack (to trap stack underflow).
+void clearpteu (pde_t *pgdir, char *uva)
+{
+    pte_t *pte;
+
+    pte = walkpgdir(pgdir, uva, 0);
+    if (pte == 0) {
+        panic("clearpteu");
+    }
+
+    // in ARM, we change the AP field (ap & 0x3) << 4)
+    *pte = (*pte & ~(0x03 << 4)) | AP_KO << 4;
+}
+
+// Given a parent process's page table, create a copy
+// of it for a child.
+pde_t* copyuvm (pde_t *pgdir, uint sz)
+{
+    pde_t *d;
+    pte_t *pte;
+    uint pa, i, ap;
+    char *mem;
+
+    // allocate a new first level page directory
+    d = kpt_alloc();
+    if (d == NULL ) {
+        return NULL ;
+    }
+
+    // copy the whole address space over (no COW)
+    for (i = 0; i < sz; i += PTE_SZ) {
+        if ((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) {
+            panic("copyuvm: pte should exist");
+        }
+
+        if (!(*pte & PE_TYPES)) {
+            panic("copyuvm: page not present");
+        }
+
+        pa = PTE_ADDR (*pte);
+        ap = PTE_AP (*pte);
+
+        if ((mem = alloc_page()) == 0) {
+            goto bad;
+        }
+
+        memmove(mem, (char*) p2v(pa), PTE_SZ);
+
+        if (mappages(d, (void*) i, PTE_SZ, v2p(mem), ap) < 0) {
+            goto bad;
+        }
+    }
+    return d;
+
+bad: freevm(d);
+    return 0;
+}
+
+//PAGEBREAK!
+// Map user virtual address to kernel address.
+char* uva2ka (pde_t *pgdir, char *uva)
+{
+    pte_t *pte;
+
+    pte = walkpgdir(pgdir, uva, 0);
+
+    // make sure it exists
+    if ((*pte & PE_TYPES) == 0) {
+        return 0;
+    }
+
+    // make sure it is a user page
+    if (PTE_AP(*pte) != AP_KU) {
+        return 0;
+    }
+
+    return (char*) p2v(PTE_ADDR(*pte));
+}
+
+// Copy len bytes from p to user address va in page table pgdir.
+// Most useful when pgdir is not the current page table.
+// uva2ka ensures this only works for user pages.
+int copyout (pde_t *pgdir, uint va, void *p, uint len)
+{
+    char *buf, *pa0;
+    uint n, va0;
+
+    buf = (char*) p;
+
+    while (len > 0) {
+        va0 = align_dn(va, PTE_SZ);
+        pa0 = uva2ka(pgdir, (char*) va0);
+
+        if (pa0 == 0) {
+            return -1;
+        }
+
+        n = PTE_SZ - (va - va0);
+
+        if (n > len) {
+            n = len;
+        }
+
+        memmove(pa0 + (va - va0), buf, n);
+
+        len -= n;
+        buf += n;
+        va = va0 + PTE_SZ;
+    }
+
+    return 0;
+}
+
+
+// 1:1 map the memory [phy_low, phy_hi] in kernel. We need to
+// use 2-level mapping for this block of memory. The rumor has
+// it that ARMv6's small brain cannot handle the case that memory
+// be mapped in both 1-level page table and 2-level page. For
+// initial kernel, we use 1MB mapping, other memory needs to be
+// mapped as 4KB pages
+void paging_init (uint phy_low, uint phy_hi)
+{
+    mappages (P2V(&_kernel_pgtbl), P2V(phy_low), phy_hi - phy_low, phy_low, AP_KU);
+    flush_tlb ();
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/paper/src/vm_impl.cbc	Thu Feb 06 18:40:10 2020 +0900
@@ -0,0 +1,140 @@
+#include "../../context.h"
+#interface "vm.h"
+
+vm* createvm_impl(struct Context* cbc_context) {
+    struct vm* vm  = new vm();
+    struct vm_impl* vm_impl = new vm_impl();
+    vm->vm = (union Data*)vm_impl;
+    vm_impl->vm_impl = NULL;
+    vm_impl->i  = 0;
+    vm_impl->pte = NULL;
+    vm_impl->sz  = 0;
+    vm_impl->loaduvm_ptesize_check = C_loaduvm_ptesize_checkvm_impl;
+    vm_impl->loaduvm_loop = C_loaduvm_loopvm_impl;
+    vm_impl->allocuvm_check_newsz = C_allocuvm_check_newszvm_impl;
+    vm_impl->allocuvm_loop = C_allocuvm_loopvm_impl;
+    vm_impl->copyuvm_check_null = C_copyuvm_check_nullvm_impl;
+    vm_impl->copyuvm_loop = C_copyuvm_loopvm_impl;
+    vm_impl->uva2ka_check_pe_types = C_uva2ka_check_pe_types;
+    vm_impl->paging_intvm_impl = C_paging_intvmvm_impl;
+    vm_impl->copyout_loopvm_impl = C_copyout_loopvm_impl;
+    vm_impl->switchuvm_check_pgdirvm_impl = C_switchuvm_check_pgdirvm_impl;
+    vm_impl->init_inituvm_check_sz = C_init_inituvm_check_sz;
+    vm->void_ret  = C_vm_void_ret;
+    vm->init_vmm = C_init_vmmvm_impl;
+    vm->kpt_freerange = C_kpt_freerangevm_impl;
+    vm->kpt_alloc = C_kpt_allocvm_impl;
+    vm->switchuvm = C_switchuvmvm_impl;
+    vm->init_inituvm = C_init_inituvmvm_impl;
+    vm->loaduvm = C_loaduvmvm_impl;
+    vm->allocuvm = C_allocuvmvm_impl;
+    vm->clearpteu = C_clearpteuvm_impl;
+    vm->copyuvm = C_copyuvmvm_impl;
+    vm->uva2ka = C_uva2kavm_impl;
+    vm->copyout = C_copyoutvm_impl;
+    vm->paging_int = C_paging_intvm_impl;
+    return vm;
+}
+extern struct {
+    struct spinlock lock;
+    struct run *freelist;
+} kpt_mem;
+
+__code init_vmmvm_impl(struct vm_impl* vm,__code next(...)) {
+    initlock(&kpt_mem.lock, "vm");
+    kpt_mem.freelist = NULL;
+
+    goto next(...);
+}
+
+extern struct run {
+    struct run *next;
+};
+
+static void _kpt_free (char *v)
+{
+    struct run *r;
+
+    r = (struct run*) v;
+    r->next = kpt_mem.freelist;
+    kpt_mem.freelist = r;
+}
+
+__code kpt_freerangevm_impl(struct vm_impl* vm, uint low, uint hi, __code next(...)) {
+
+   if (low < hi) {
+     _kpt_free((char*)low);
+     goto kpt_freerangevm_impl(vm, low + PT_SZ, hi, next(...));
+
+   }
+  goto next(...);
+}
+
+__code kpt_allocvm_impl(struct vm_impl* vm, __code next(...)) {
+  acquire(&kpt_mem.lock);
+
+  goto kpt_alloc_check_impl(vm_impl, next(...));
+}
+
+typedef struct proc proc;
+__code switchuvmvm_impl(struct vm_impl* vm , struct proc* p, __code next(...)) { //:skip
+
+    goto switchuvm_check_pgdirvm_impl(...);
+}
+
+__code init_inituvmvm_impl(struct vm_impl* vm, pde_t* pgdir, char* init, uint sz, __code next(...)) {
+
+    Gearef(cbc_context, vm_impl)->pgdir = pgdir;
+    Gearef(cbc_context, vm_impl)->init = init;
+    Gearef(cbc_context, vm_impl)->sz = sz;
+    Gearef(cbc_context, vm_impl)->next = next;
+    goto init_inituvm_check_sz(vm, pgdir, init, sz, next(...));
+}
+
+__code loaduvmvm_impl(struct vm_impl* vm, pde_t* pgdir, char* addr, struct inode* ip, uint offset, uint sz,  __code next(...)) {
+    Gearef(cbc_context, vm_impl)->pgdir = pgdir;
+    Gearef(cbc_context, vm_impl)->addr = addr;
+    Gearef(cbc_context, vm_impl)->ip = ip;
+    Gearef(cbc_context, vm_impl)->offset = offset;
+    Gearef(cbc_context, vm_impl)->sz = sz;
+    Gearef(cbc_context, vm_impl)->next = next;
+
+    goto loaduvm_ptesize_checkvm_impl(vm, next(...));
+}
+
+__code allocuvmvm_impl(struct vm_impl* vm, pde_t* pgdir, uint oldsz, uint newsz, __code next(...)) {
+
+    goto allocuvm_check_newszvm_impl(vm, pgdir, oldsz, newsz, next(...));
+}
+
+__code clearpteuvm_impl(struct vm_impl* vm, pde_t* pgdir, char* uva,  __code next(...)) {
+
+    goto clearpteu_check_ptevm_impl(vm, pgdir, uva, next(...));
+}
+
+__code copyuvmvm_impl(struct vm_impl* vm, pde_t* pgdir, uint sz, __code next(...)) {
+
+    goto copyuvm_check_nullvm_impl(vm, pgdir, sz, __code next(...));
+}
+
+__code uva2kavm_impl(struct vm_impl* vm, pde_t* pgdir, char* uva, __code next(...)) {
+
+    goto uva2ka_check_pe_types(vm, pgdir, uva, next(...));
+}
+
+__code copyoutvm_impl(struct vm_impl* vm, pde_t* pgdir, uint va, void* pp, uint len, __code next(...)) {
+
+    vm->buf = (char*) pp;
+
+    goto copyout_loopvm_impl(vm, pgdir, va, pp, len, va0, pa0, next(...));
+}
+
+__code paging_intvm_impl(struct vm_impl* vm, uint phy_low, uint phy_hi, __code next(...)) {
+
+    goto paging_intvmvm_impl(vm, phy_low, phy_hi, next(...));
+}
+
+__code vm_void_ret(struct vm_impl* vm) {
+    return;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/paper/src/vm_impl_private.h	Thu Feb 06 18:40:10 2020 +0900
@@ -0,0 +1,46 @@
+typedef struct vm_impl<Impl, Isa> impl vm{
+    union Data* vm_impl;
+    uint i;
+    pte_t* pte;
+    uint sz;
+    pde_t* pgdir;
+    char* addr;
+    struct inode* ip;
+    uint offset;
+    uint pa;
+    uint n;
+    uint oldsz;
+    uint newsz;
+    uint a;
+    int ret;
+    char* mem;
+    char* uva;
+    pde_t* d;
+    uint ap;
+    uint phy_low;
+    uint phy_hi;
+    uint va;
+    void* pp;
+    uint len;
+    char* buf;
+    char* pa0;
+    uint va0;
+    proc_struct* p;
+    char* init;
+
+    __code kpt_alloc_check_impl(Type* vm_impl, __code next(...));
+    __code loaduvm_ptesize_check(Type* vm_impl, __code next(int ret, ...));
+    __code loaduvm_loop(Type* vm_impl, uint i, pte_t* pte, uint sz, __code next(int ret, ...));
+    __code allocuvm_check_newsz(Type* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, __code next(...));
+    __code allocuvm_loop(Type* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, uint a, __code next(...));
+    __code copyuvm_check_null(Type* vm_impl, pde_t* pgdir, uint sz, __code next(...));
+    __code copyuvm_loop(Type* vm_impl,pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...));
+    __code clearpteu_check_ptevm_impl(Type* vm_impl, pde_t* pgdir, char* uva,  __code next(...));
+    __code uva2ka_check_pe_types(Type* vm_impl, pde_t* pgdir, char* uva, __code next(...));
+    __code paging_intvm_impl(Type* vm_impl, uint phy_low, uint phy_hi, __code next(...));
+    __code copyout_loopvm_impl(Type* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, __code next(...));
+    __code switchuvm_check_pgdirvm_impl(struct vm_impl* vm_impl, struct proc* p, __code next(...));
+    __code init_inituvm_check_sz(struct vm_impl* vm_impl, pde_t* pgdir, char* init, uint sz, __code next(...));
+    __code void_ret(Type* vm_impl);
+    __code next(...);
+} vm_impl;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/paper/src/vm_impl_private_all.cbc	Thu Feb 06 18:40:10 2020 +0900
@@ -0,0 +1,400 @@
+#include "param.h"
+#include "proc.h"
+#include "mmu.h"
+#include "defs.h"
+#include "memlayout.h"
+#interface "vm_impl.h"
+
+/*
+vm_impl* createvm_impl2(); //:skip
+*/
+
+__code loaduvm_ptesize_checkvm_impl(struct vm_impl* vm_impl, __code next(int ret, ...)) {
+    char* addr = vm_impl->addr;
+
+    if ((uint) addr %PTE_SZ != 0) {
+       // goto panic 
+    }
+
+    goto loaduvm_loopvm_impl(vm_impl, next(ret, ...));
+}
+
+__code loaduvm_loopvm_impl(struct vm_impl* vm_impl, __code next(int ret, ...)) {
+    uint i = vm_impl->i;
+    uint sz = vm_impl->sz;
+
+    if (i < sz) {
+        goto loaduvm_check_pgdir(vm_impl, next(ret, ...));  
+    } 
+
+    goto loaduvm_exit(vm_impl, next(ret, ...));
+}
+
+
+static pte_t* walkpgdir (pde_t *pgdir, const void *va, int alloc)
+{
+    pde_t *pde;
+    pte_t *pgtab;
+
+    // pgdir points to the page directory, get the page direcotry entry (pde)
+    pde = &pgdir[PDE_IDX(va)];
+
+    if (*pde & PE_TYPES) {
+        pgtab = (pte_t*) p2v(PT_ADDR(*pde));
+
+    } else {
+        if (!alloc || (pgtab = (pte_t*) kpt_alloc()) == 0) {
+            return 0;
+        }
+
+        // Make sure all those PTE_P bits are zero.
+        memset(pgtab, 0, PT_SZ);
+
+        // The permissions here are overly generous, but they can
+        // be further restricted by the permissions in the page table
+        // entries, if necessary.
+        *pde = v2p(pgtab) | UPDE_TYPE;
+    }
+
+    return &pgtab[PTE_IDX(va)];
+}
+
+
+__code loaduvm_check_pgdir(struct vm_impl* vm_impl, __code next(int ret, ...)) {
+    pte_t* pte = vm_impl->pte;
+    pde_t* pgdir = vm_impl->pgdir;
+    uint i = vm_impl->i;
+    char* addr = vm_impl->addr;
+    uint pa = vm_impl->pa;
+
+    if ((pte = walkpgdir(pgdir, addr + i, 0)) == 0) {
+        // goto panic
+    } 
+    pa = PTE_ADDR(*pte);
+
+    vm_impl->pte = pte; 
+    vm_impl->pgdir = pgdir; 
+    vm_impl->addr = addr; 
+    vm_impl->pa = pa; 
+
+    goto loaduvm_check_PTE_SZ(vm_impl, next(ret, ...));
+}
+
+__code loaduvm_check_PTE_SZ(struct vm_impl* vm_impl, __code next(int ret, ...)) {
+    uint sz = vm_impl->sz;
+    uint i = vm_impl->i;
+    uint n = vm_impl->n;
+    struct inode* ip = vm_impl->ip;
+    uint pa = vm_impl->pa;
+    uint offset = vm_impl->offset;
+    
+    if (sz - i < PTE_SZ) {
+        n = sz - i;
+    } else {
+        n = PTE_SZ;
+    }
+
+    if (readi(ip, p2v(pa), offset + i, n) != n) {
+        ret = -1;
+        goto next(ret, ...);
+    }
+
+    vm_impl->n = n;
+ 
+    goto loaduvm_loopvm_impl(vm_impl, next(ret, ...));
+}
+
+__code loaduvm_exit(struct vm_impl* vm_impl, __code next(int ret, ...)) {
+    ret = 0;
+    goto next(ret, ...);
+}
+
+struct run {
+    struct run *next;
+};
+
+struct {
+    struct spinlock lock;
+    struct run* freelist;
+} kpt_mem;
+
+
+static int mappages (pde_t *pgdir, void *va, uint size, uint pa, int ap)
+{
+    char *a, *last;
+    pte_t *pte;
+
+    a = (char*) align_dn(va, PTE_SZ);
+    last = (char*) align_dn((uint)va + size - 1, PTE_SZ);
+
+    for (;;) {
+        if ((pte = walkpgdir(pgdir, a, 1)) == 0) {
+            return -1;
+        }
+
+        if (*pte & PE_TYPES) {
+            panic("remap");
+        }
+
+        *pte = pa | ((ap & 0x3) << 4) | PE_CACHE | PE_BUF | PTE_TYPE;
+
+        if (a == last) {
+            break;
+        }
+
+        a += PTE_SZ;
+        pa += PTE_SZ;
+    }
+
+    return 0;
+}
+
+__code kpt_alloc_check_impl(struct vm_impl* vm_impl, __code next(...)) { 
+    struct run* r;    
+    if ((r = kpt_mem.freelist) != NULL ) {
+        kpt_mem.freelist = r->next;
+    }
+    release(&kpt_mem.lock);
+
+    if ((r == NULL) && ((r = kmalloc (PT_ORDER)) == NULL)) {
+        // panic("oom: kpt_alloc");
+        // goto panic
+    }
+
+    memset(r, 0, PT_SZ);
+    goto next((char*)r);
+}
+
+__code allocuvm_check_newszvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, __code next(int ret, ...)){
+    if (newsz >= UADDR_SZ) {
+       goto next(0, ...);
+    }
+
+    if (newsz < oldsz) {
+       ret = newsz;
+       goto next(ret, ...);
+    }
+
+    char* mem;
+    uint a = align_up(oldsz, PTE_SZ);
+
+    goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, mem, a, next(ret, ...));
+}
+
+__code allocuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint oldsz, uint newsz, char* mem, uint a, __code next(int ret, ...)){
+
+    if (a < newsz) {
+        mem = alloc_page();
+
+        if (mem == 0) {
+            cprintf("allocuvm out of memory\n");
+            deallocuvm(pgdir, newsz, oldsz);
+            goto next(0, ...);
+        }
+
+        memset(mem, 0, PTE_SZ);
+        mappages(pgdir, (char*) a, PTE_SZ, v2p(mem), AP_KU);
+
+        goto allocuvm_loopvm_impl(vm_impl, pgdir, oldsz, newsz, a + PTE_SZ, next(ret, ...)); 
+    }
+    ret = newsz; 
+    goto next(ret, ...);
+}
+
+__code clearpteu_check_ptevm_impl(struct vm_impl* vm_impl, pde_t* pgdir, char* uva, __code next(int ret, ...)) {
+    pte_t *pte;
+
+    pte = walkpgdir(pgdir, uva, 0);
+    if (pte == 0) {
+        // panic("clearpteu");
+        // goto panic;
+    }
+
+    // in ARM, we change the AP field (ap & 0x3) << 4)
+    *pte = (*pte & ~(0x03 << 4)) | AP_KO << 4;
+
+    goto next(ret, ...);
+}
+
+__code copyuvm_check_nullvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, __code next(int ret, ...)) {
+    pde_t *d;
+    pte_t *pte;
+    uint pa, i, ap;
+    char *mem;
+
+    // allocate a new first level page directory
+    d = kpt_alloc();
+    if (d == NULL ) {
+        ret = NULL;
+        goto next(ret, ...);
+    }
+    i = 0;
+
+    goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, *d, *pte, pa, i, ap, *mem, next(ret, ...));
+}
+
+__code copyuvm_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
+
+    if (i < sz) { 
+        goto copyuvm_loop_check_walkpgdir(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
+
+    }
+    ret = d;
+    goto next(ret, ...);
+}
+
+__code copyuvm_loop_check_walkpgdir(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
+        if ((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) {
+            // panic("copyuvm: pte should exist");
+            // goto panic();
+        }
+    goto copyuvm_loop_check_pte(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
+}
+
+__code copyuvm_loop_check_pte(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
+
+        if (!(*pte & PE_TYPES)) {
+            // panic("copyuvm: page not present");
+            // goto panic();
+        }
+
+    goto copyuvm_loop_check_mem(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
+}
+
+__code copyuvm_loop_check_mem(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
+    pa = PTE_ADDR (*pte);
+    ap = PTE_AP (*pte);
+
+    if ((mem = alloc_page()) == 0) {
+        goto copyuvm_loop_bad(vm_impl, d, next(...));
+    }
+    goto copyuvm_loop_check_mappages(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
+    
+}
+
+__code copyuvm_loop_check_mappages(struct vm_impl* vm_impl, pde_t* pgdir, uint sz, pde_t* d, pte_t* pte, uint pa, uint i, uint ap, char* mem, __code next(int ret, ...)) {
+
+    memmove(mem, (char*) p2v(pa), PTE_SZ);
+
+    if (mappages(d, (void*) i, PTE_SZ, v2p(mem), ap) < 0) {
+        goto copyuvm_loop_bad(vm_impl, d, next(...));
+    }
+    goto copyuvm_loopvm_impl(vm_impl, pgdir, sz, d, pte, pa, i, ap, mem, __code next(int ret, ...));
+ 
+}
+
+__code copyuvm_loop_bad(struct vm_impl* vm_impl, pde_t* d, __code next(int ret, ...)) {
+    freevm(d);
+    ret = 0;
+    goto next(ret, ...);
+}
+
+
+__code uva2ka_check_pe_types(struct vm_impl* vm, pde_t* pgdir, char* uva, __code next(int ret, ...)) {
+    pte_t* pte;
+
+    pte = walkpgdir(pgdir, uva, 0);
+
+    // make sure it exists
+    if ((*pte & PE_TYPES) == 0) {
+        ret = 0;
+        goto next(ret, ...);
+    }
+    goto uva2ka_check_pte_ap(vm, pgdir, uva, pte, next(...));
+}
+
+__code uva2ka_check_pte_ap(struct vm_impl* vm, pde_t* pgdir, char* uva, pte_t* pte, __code next(int ret, ...)) {
+    // make sure it is a user page
+    if (PTE_AP(*pte) != AP_KU) {
+        ret = 0;
+        goto next(ret, ...);
+    }
+    ret = (char*) p2v(PTE_ADDR(*pte));
+    goto next(ret, ...);
+}
+
+// flush all TLB
+static void flush_tlb (void)
+{
+    uint val = 0;
+    asm("MCR p15, 0, %[r], c8, c7, 0" : :[r]"r" (val):);
+
+    // invalid entire data and instruction cache
+    asm ("MCR p15,0,%[r],c7,c10,0": :[r]"r" (val):);
+    asm ("MCR p15,0,%[r],c7,c11,0": :[r]"r" (val):);
+}
+
+__code paging_intvmvm_impl(struct vm_impl* vm_impl, uint phy_low, uint phy_hi, __code next(...)) {
+    mappages (P2V(&_kernel_pgtbl), P2V(phy_low), phy_hi - phy_low, phy_low, AP_KU);
+    flush_tlb ();
+
+    goto next(...);
+}
+
+__code copyout_loopvm_impl(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0,  __code next(int ret, ...)) {
+    if (len > 0) {
+        va0 = align_dn(va, PTE_SZ);
+        pa0 = uva2ka(pgdir, (char*) va0);
+        goto copyout_loop_check_pa0(vm_impl, pgdir, va, pp, len, va0, pa0, n, next(...));
+    }
+    ret = 0;
+    goto next(ret, ...);
+
+}
+
+__code copyout_loop_check_pa0(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, uint n, __code next(int ret, ...)) {
+    if (pa0 == 0) {
+        ret = -1;
+        goto next(ret, ...);
+    }
+    goto copyout_loop_check_n(vm_impl, pgdir, va, pp, len, va0, pa0, n, buf, next(...));
+}
+__code copyout_loop_check_n(struct vm_impl* vm_impl, pde_t* pgdir, uint va, void* pp, uint len, uint va0, char* pa0, uint n, char* buf, __code next(...)) {
+    n = PTE_SZ - (va - va0);
+
+    if (n > len) {
+        n = len;
+    }
+
+    len -= n;
+    buf += n;
+    va = va0 + PTE_SZ;
+    goto copyout_loopvm_impl(vm_impl, pgdir, va, pp, len, va0, pa0, next(...));
+}
+
+typedef struct proc proc_struct;
+__code switchuvm_check_pgdirvm_impl(struct vm_impl* vm_impl, proc_struct* p, __code next(...)) { //:skip
+    uint val;
+
+    pushcli();
+
+    if (p->pgdir == 0) {
+        panic("switchuvm: no pgdir");
+    }
+
+    val = (uint) V2P(p->pgdir) | 0x00;
+
+    asm("MCR p15, 0, %[v], c2, c0, 0": :[v]"r" (val):);
+    flush_tlb();
+
+    popcli();
+
+    goto next(...);
+}
+
+__code init_inituvm_check_sz(struct vm_impl* vm_impl, pde_t* pgdir, char* init, uint sz, __code next(...)) {
+    char* mem;
+
+    if (sz >= PTE_SZ) {
+        // goto panic;
+        // panic("inituvm: more than a page");
+    }
+
+    mem = alloc_page();
+    memset(mem, 0, PTE_SZ);
+    mappages(pgdir, 0, PTE_SZ, v2p(mem), AP_KU);
+    memmove(mem, init, sz);
+
+    goto next(...);
+}
+