Mercurial > hg > Members > innparusu > xv6_rpi_port
comparison source/proc.c @ 0:c450faca55f4
Init
author | Tatsuki IHA <innparusu@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Sun, 22 Oct 2017 18:25:39 +0900 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:c450faca55f4 |
---|---|
1 /***************************************************************** | |
2 * proc.c | |
3 * adapted from MIT xv6 by Zhiyi Huang, hzy@cs.otago.ac.nz | |
4 * University of Otago | |
5 * | |
6 ********************************************************************/ | |
7 | |
8 | |
9 | |
10 #include "types.h" | |
11 #include "defs.h" | |
12 #include "param.h" | |
13 #include "memlayout.h" | |
14 #include "mmu.h" | |
15 #include "arm.h" | |
16 #include "proc.h" | |
17 #include "spinlock.h" | |
18 | |
19 struct { | |
20 struct spinlock lock; | |
21 struct proc proc[NPROC]; | |
22 } ptable; | |
23 | |
24 static struct proc *initproc; | |
25 | |
26 int first_sched = 1; | |
27 int nextpid = 1; | |
28 extern void forkret(void); | |
29 extern void trapret(void); | |
30 | |
31 static void wakeup1(void *chan); | |
32 | |
33 void | |
34 pinit(void) | |
35 { | |
36 memset(&ptable, 0, sizeof(ptable)); | |
37 initlock(&ptable.lock, "ptable"); | |
38 | |
39 } | |
40 | |
41 //PAGEBREAK: 32 | |
42 // Look in the process table for an UNUSED proc. | |
43 // If found, change state to EMBRYO and initialize | |
44 // state required to run in the kernel. | |
45 // Otherwise return 0. | |
46 static struct proc* | |
47 allocproc(void) | |
48 { | |
49 struct proc *p; | |
50 char *sp; | |
51 | |
52 acquire(&ptable.lock); | |
53 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) | |
54 if(p->state == UNUSED) | |
55 goto found; | |
56 release(&ptable.lock); | |
57 return 0; | |
58 | |
59 found: | |
60 p->state = EMBRYO; | |
61 p->pid = nextpid++; | |
62 release(&ptable.lock); | |
63 | |
64 // Allocate kernel stack. | |
65 if((p->kstack = kalloc()) == 0){ | |
66 p->state = UNUSED; | |
67 return 0; | |
68 } | |
69 memset(p->kstack, 0, PGSIZE); | |
70 sp = p->kstack + KSTACKSIZE; | |
71 | |
72 // Leave room for trap frame. | |
73 sp -= sizeof *p->tf; | |
74 p->tf = (struct trapframe*)sp; | |
75 | |
76 // Set up new context to start executing at forkret, | |
77 // which returns to trapret. | |
78 | |
79 sp -= sizeof *p->context; | |
80 p->context = (struct context*)sp; | |
81 memset(p->context, 0, sizeof *p->context); | |
82 p->context->pc = (uint)forkret; | |
83 p->context->lr = (uint)trapret; | |
84 | |
85 return p; | |
86 } | |
87 | |
88 //PAGEBREAK: 32 | |
89 // Set up first user process. | |
90 void | |
91 userinit(void) | |
92 { | |
93 struct proc *p; | |
94 extern char _binary_initcode_start[], _binary_initcode_end[]; | |
95 uint _binary_initcode_size; | |
96 | |
97 _binary_initcode_size = (uint)_binary_initcode_end - (uint)_binary_initcode_start; | |
98 p = allocproc(); | |
99 //cprintf("after allocproc: initcode start: %x end %x\n", _binary_initcode_start, _binary_initcode_end); | |
100 initproc = p; | |
101 //cprintf("initproc is %x\n", initproc); | |
102 if((p->pgdir = setupkvm()) == 0) | |
103 panic("userinit: out of memory?"); | |
104 //cprintf("after setupkvm\n"); | |
105 inituvm(p->pgdir, _binary_initcode_start, _binary_initcode_size); | |
106 //cprintf("after initkvm\n"); | |
107 p->sz = PGSIZE; | |
108 memset(p->tf, 0, sizeof(*p->tf)); | |
109 p->tf->spsr = 0x10; | |
110 p->tf->sp = PGSIZE; | |
111 p->tf->pc = 0; // beginning of initcode.S | |
112 | |
113 safestrcpy(p->name, "initcode", sizeof(p->name)); | |
114 p->cwd = namei("/"); | |
115 | |
116 p->state = RUNNABLE; | |
117 } | |
118 | |
119 // Grow current process's memory by n bytes. | |
120 // Return 0 on success, -1 on failure. | |
121 int | |
122 growproc(int n) | |
123 { | |
124 uint sz; | |
125 | |
126 sz = curr_proc->sz; | |
127 if(n > 0){ | |
128 if((sz = allocuvm(curr_proc->pgdir, sz, sz + n)) == 0) | |
129 return -1; | |
130 } else if(n < 0){ | |
131 if((sz = deallocuvm(curr_proc->pgdir, sz, sz + n)) == 0) | |
132 return -1; | |
133 } | |
134 curr_proc->sz = sz; | |
135 switchuvm(curr_proc); | |
136 return 0; | |
137 } | |
138 | |
139 // Create a new process copying p as the parent. | |
140 // Sets up stack to return as if from system call. | |
141 // Caller must set state of returned proc to RUNNABLE. | |
142 int | |
143 fork(void) | |
144 { | |
145 int i, pid; | |
146 struct proc *np; | |
147 | |
148 // Allocate process. | |
149 if((np = allocproc()) == 0) | |
150 return -1; | |
151 | |
152 // Copy process state from p. | |
153 if((np->pgdir = copyuvm(curr_proc->pgdir, curr_proc->sz)) == 0){ | |
154 kfree(np->kstack); | |
155 np->kstack = 0; | |
156 np->state = UNUSED; | |
157 return -1; | |
158 } | |
159 np->sz = curr_proc->sz; | |
160 np->parent = curr_proc; | |
161 *np->tf = *curr_proc->tf; | |
162 | |
163 // Clear r0 so that fork returns 0 in the child. | |
164 np->tf->r0 = 0; | |
165 | |
166 for(i = 0; i < NOFILE; i++) | |
167 if(curr_proc->ofile[i]) | |
168 np->ofile[i] = filedup(curr_proc->ofile[i]); | |
169 np->cwd = idup(curr_proc->cwd); | |
170 | |
171 pid = np->pid; | |
172 np->state = RUNNABLE; | |
173 safestrcpy(np->name, curr_proc->name, sizeof(curr_proc->name)); | |
174 return pid; | |
175 } | |
176 | |
177 // Exit the current process. Does not return. | |
178 // An exited process remains in the zombie state | |
179 // until its parent calls wait() to find out it exited. | |
180 void | |
181 exit(void) | |
182 { | |
183 struct proc *p; | |
184 int fd; | |
185 | |
186 if(curr_proc == initproc) | |
187 panic("init exiting"); | |
188 | |
189 // Close all open files. | |
190 for(fd = 0; fd < NOFILE; fd++){ | |
191 if(curr_proc->ofile[fd]){ | |
192 fileclose(curr_proc->ofile[fd]); | |
193 curr_proc->ofile[fd] = 0; | |
194 } | |
195 } | |
196 | |
197 iput(curr_proc->cwd); | |
198 curr_proc->cwd = 0; | |
199 | |
200 acquire(&ptable.lock); | |
201 | |
202 // Parent might be sleeping in wait(). | |
203 wakeup1(curr_proc->parent); | |
204 | |
205 // Pass abandoned children to init. | |
206 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ | |
207 if(p->parent == curr_proc){ | |
208 p->parent = initproc; | |
209 if(p->state == ZOMBIE) | |
210 wakeup1(initproc); | |
211 } | |
212 } | |
213 | |
214 // Jump into the scheduler, never to return. | |
215 curr_proc->state = ZOMBIE; | |
216 sched(); | |
217 panic("zombie exit"); | |
218 } | |
219 | |
220 // Wait for a child process to exit and return its pid. | |
221 // Return -1 if this process has no children. | |
222 int | |
223 wait(void) | |
224 { | |
225 struct proc *p; | |
226 int havekids, pid; | |
227 | |
228 acquire(&ptable.lock); | |
229 for(;;){ | |
230 // Scan through table looking for zombie children. | |
231 havekids = 0; | |
232 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ | |
233 if(p->parent != curr_proc) | |
234 continue; | |
235 havekids = 1; | |
236 if(p->state == ZOMBIE){ | |
237 // Found one. | |
238 pid = p->pid; | |
239 kfree(p->kstack); | |
240 p->kstack = 0; | |
241 freevm(p->pgdir); | |
242 p->state = UNUSED; | |
243 p->pid = 0; | |
244 p->parent = 0; | |
245 p->name[0] = 0; | |
246 p->killed = 0; | |
247 release(&ptable.lock); | |
248 return pid; | |
249 } | |
250 } | |
251 | |
252 // No point waiting if we don't have any children. | |
253 if(!havekids || curr_proc->killed){ | |
254 release(&ptable.lock); | |
255 return -1; | |
256 } | |
257 //cprintf("inside wait before calling sleep\n"); | |
258 // Wait for children to exit. (See wakeup1 call in proc_exit.) | |
259 sleep(curr_proc, &ptable.lock); //DOC: wait-sleep | |
260 } | |
261 } | |
262 | |
263 //PAGEBREAK: 42 | |
264 // Per-CPU process scheduler. | |
265 // Each CPU calls scheduler() after setting itself up. | |
266 // Scheduler never returns. It loops, doing: | |
267 // - choose a process to run | |
268 // - swtch to start running that process | |
269 // - eventually that process transfers control | |
270 // via swtch back to the scheduler. | |
271 void | |
272 scheduler(void) | |
273 { | |
274 struct proc *p; | |
275 | |
276 for(;;){ | |
277 // Enable interrupts on this processor. | |
278 //cprintf("before enabling interrupts\n"); | |
279 if(first_sched) first_sched = 0; | |
280 else sti(); | |
281 | |
282 // Loop over process table looking for process to run. | |
283 acquire(&ptable.lock); | |
284 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ | |
285 if(p->state != RUNNABLE) | |
286 continue; | |
287 | |
288 // Switch to chosen process. It is the process's job | |
289 // to release ptable.lock and then reacquire it | |
290 // before jumping back to us. | |
291 curr_proc = p; | |
292 //cprintf("before switching page table\n"); | |
293 switchuvm(p); | |
294 p->state = RUNNING; | |
295 //cprintf("after switching page table\n"); | |
296 | |
297 swtch(&curr_cpu->scheduler, curr_proc->context); | |
298 | |
299 switchkvm(); | |
300 | |
301 // Process is done running for now. | |
302 // It should have changed its p->state before coming back. | |
303 curr_proc = 0; | |
304 } | |
305 release(&ptable.lock); | |
306 | |
307 } | |
308 } | |
309 | |
310 // Enter scheduler. Must hold only ptable.lock | |
311 // and have changed proc->state. | |
312 void | |
313 sched(void) | |
314 { | |
315 int intena; | |
316 | |
317 if(!holding(&ptable.lock)) | |
318 panic("sched ptable.lock"); | |
319 if(curr_cpu->ncli != 1) | |
320 panic("sched locks"); | |
321 if(curr_proc->state == RUNNING) | |
322 panic("sched running"); | |
323 if(!(readcpsr()&PSR_DISABLE_IRQ)) | |
324 panic("sched interruptible"); | |
325 intena = curr_cpu->intena; | |
326 swtch(&curr_proc->context, curr_cpu->scheduler); | |
327 curr_cpu->intena = intena; | |
328 } | |
329 | |
330 // Give up the CPU for one scheduling round. | |
331 void | |
332 yield(void) | |
333 { | |
334 acquire(&ptable.lock); //DOC: yieldlock | |
335 curr_proc->state = RUNNABLE; | |
336 sched(); | |
337 release(&ptable.lock); | |
338 } | |
339 | |
340 // A fork child's very first scheduling by scheduler() | |
341 // will swtch here. "Return" to user space. | |
342 void | |
343 forkret(void) | |
344 { | |
345 static int first = 1; | |
346 // Still holding ptable.lock from scheduler. | |
347 release(&ptable.lock); | |
348 | |
349 if (first) { | |
350 // Some initialization functions must be run in the context | |
351 // of a regular process (e.g., they call sleep), and thus cannot | |
352 // be run from main(). | |
353 first = 0; | |
354 initlog(); | |
355 } | |
356 //cprintf("inside forkret\n"); | |
357 | |
358 // Return to "caller", actually trapret (see allocproc). | |
359 } | |
360 | |
361 // Atomically release lock and sleep on chan. | |
362 // Reacquires lock when awakened. | |
363 void | |
364 sleep(void *chan, struct spinlock *lk) | |
365 { | |
366 if(curr_proc == 0) | |
367 panic("sleep"); | |
368 | |
369 if(lk == 0) | |
370 panic("sleep without lk"); | |
371 | |
372 // Must acquire ptable.lock in order to | |
373 // change p->state and then call sched. | |
374 // Once we hold ptable.lock, we can be | |
375 // guaranteed that we won't miss any wakeup | |
376 // (wakeup runs with ptable.lock locked), | |
377 // so it's okay to release lk. | |
378 if(lk != &ptable.lock){ //DOC: sleeplock0 | |
379 acquire(&ptable.lock); //DOC: sleeplock1 | |
380 release(lk); | |
381 } | |
382 | |
383 // Go to sleep. | |
384 curr_proc->chan = chan; | |
385 curr_proc->state = SLEEPING; | |
386 //cprintf("inside sleep before calling sched\n"); | |
387 sched(); | |
388 | |
389 // Tidy up. | |
390 curr_proc->chan = 0; | |
391 | |
392 // Reacquire original lock. | |
393 if(lk != &ptable.lock){ //DOC: sleeplock2 | |
394 release(&ptable.lock); | |
395 acquire(lk); | |
396 } | |
397 } | |
398 | |
399 //PAGEBREAK! | |
400 // Wake up all processes sleeping on chan. | |
401 // The ptable lock must be held. | |
402 static void | |
403 wakeup1(void *chan) | |
404 { | |
405 struct proc *p; | |
406 | |
407 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) | |
408 if(p->state == SLEEPING && p->chan == chan) | |
409 p->state = RUNNABLE; | |
410 } | |
411 | |
412 // Wake up all processes sleeping on chan. | |
413 void | |
414 wakeup(void *chan) | |
415 { | |
416 acquire(&ptable.lock); | |
417 wakeup1(chan); | |
418 release(&ptable.lock); | |
419 } | |
420 | |
421 // Kill the process with the given pid. | |
422 // Process won't exit until it returns | |
423 // to user space (see trap in trap.c). | |
424 int | |
425 kill(int pid) | |
426 { | |
427 struct proc *p; | |
428 | |
429 acquire(&ptable.lock); | |
430 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ | |
431 if(p->pid == pid){ | |
432 p->killed = 1; | |
433 // Wake process from sleep if necessary. | |
434 if(p->state == SLEEPING) | |
435 p->state = RUNNABLE; | |
436 release(&ptable.lock); | |
437 return 0; | |
438 } | |
439 } | |
440 release(&ptable.lock); | |
441 return -1; | |
442 } | |
443 | |
444 //PAGEBREAK: 36 | |
445 // Print a process listing to console. For debugging. | |
446 // Runs when user types ^P on console. | |
447 // No lock to avoid wedging a stuck machine further. | |
448 void | |
449 procdump(void) | |
450 { | |
451 } | |
452 | |
453 |