diff src/spinlock.c @ 35:ad1d3b268e2d

update
author mir3636
date Fri, 22 Feb 2019 16:32:51 +0900
parents 83c23a36980d
children fb3e5a2f76c1
line wrap: on
line diff
--- a/src/spinlock.c	Thu Feb 14 23:33:57 2019 +0900
+++ b/src/spinlock.c	Fri Feb 22 16:32:51 2019 +0900
@@ -46,7 +46,56 @@
 #endif
 }
 
+void cbc_acquire(struct spinlock *lk, __code (*next)(int ret))
+{
+    pushcli();		// disable interrupts to avoid deadlock.
+    lk->locked = 1;	// set the lock status to make the kernel happy
+
+#if 0
+    if(holding(lk))
+        panic("acquire");
+
+    // The xchg is atomic.
+    // It also serializes, so that reads after acquire are not
+    // reordered before it.
+    while(xchg(&lk->locked, 1) != 0)
+        ;
+
+    // Record info about lock acquisition for debugging.
+    lk->cpu = cpu;
+    getcallerpcs(get_fp(), lk->pcs);
+
+#endif
+    goto next();
+}
+
 // Release the lock.
+void cbc_release(struct spinlock *lk, __code (*next)(int ret))
+{
+#if 0
+    if(!holding(lk))
+        panic("release");
+
+    lk->pcs[0] = 0;
+    lk->cpu = 0;
+
+    // The xchg serializes, so that reads before release are
+    // not reordered after it.  The 1996 PentiumPro manual (Volume 3,
+    // 7.2) says reads can be carried out speculatively and in
+    // any order, which implies we need to serialize here.
+    // But the 2007 Intel 64 Architecture Memory Ordering White
+    // Paper says that Intel 64 and IA-32 will not move a load
+    // after a store. So lock->locked = 0 would work here.
+    // The xchg being asm volatile ensures gcc emits it after
+    // the above assignments (and after the critical section).
+    xchg(&lk->locked, 0);
+#endif
+
+    lk->locked = 0; // set the lock state to keep the kernel happy
+    popcli();
+    goto next();
+}
+
 void release(struct spinlock *lk)
 {
 #if 0