changeset 1877:f73ca57cdc81 draft

add ppe wait semaphore to avoid ppe busy wait
author Kohagura
date Tue, 31 Dec 2013 19:33:26 +0900
parents 2f04c761bf9f
children 11c70b35a83f
files TaskManager/Cell/CellTaskManagerImpl.cc TaskManager/Cell/CellTaskManagerImpl.h TaskManager/Gpu/GpuThreads.cc TaskManager/Gpu/GpuThreads.h TaskManager/kernel/ppe/CpuThreads.cc TaskManager/kernel/ppe/CpuThreads.h TaskManager/kernel/ppe/Sem.cc TaskManager/kernel/ppe/Sem.h TaskManager/kernel/ppe/SynchronizedMailManager.cc TaskManager/kernel/ppe/SynchronizedMailManager.h TaskManager/kernel/ppe/Threads.h TaskManager/kernel/schedule/DmaManager.h
diffstat 12 files changed, 60 insertions(+), 9 deletions(-) [+]
line wrap: on
line diff
--- a/TaskManager/Cell/CellTaskManagerImpl.cc	Mon Dec 30 20:16:36 2013 +0900
+++ b/TaskManager/Cell/CellTaskManagerImpl.cc	Tue Dec 31 19:33:26 2013 +0900
@@ -21,12 +21,14 @@
     delete speThreads;
     delete[] speTaskList;
     delete ppeManager;
+    delete recive_wait;
 }
 
 void CellTaskManagerImpl::init(int spuIdle_,int useRefDma,int export_task_log) {
     spe_running = 0;
     spuIdle = spuIdle_;
     int m = machineNum == 0?1:machineNum; // at least 1 tasklistinfo in -cpu 0
+    recive_wait = new Sem(0);
 
     // 実行される Task 用の パイプライン用のダブルバッファ
     speTaskList = new QueueInfo<TaskList>*[m]; // spe上の走っている Task の配列
@@ -63,6 +65,8 @@
     ppeManager->schedTaskManager = schedTaskManager;
 
     _export_task_log = export_task_log;
+
+    speThreads->set_mail_waiter(recive_wait);
 }
 
 void CellTaskManagerImpl::append_activeTask(HTaskPtr task) {
@@ -192,9 +196,13 @@
         // PPE side
         ppeManager->poll();
         // SPE side
-        do {
+        for (;;) {
             poll();
-        } while (ppeManager->activeTaskQueue->empty() && spe_running > 0);
+            if (ppeManager->activeTaskQueue->empty() && spe_running > 0)
+	        recive_wait->sem_p();
+	    else
+                break;
+        }
 
         if (spe_running < spu_limit) {
             debug_check_spe_idle(ppeManager->activeTaskQueue, spe_running);
--- a/TaskManager/Cell/CellTaskManagerImpl.h	Mon Dec 30 20:16:36 2013 +0900
+++ b/TaskManager/Cell/CellTaskManagerImpl.h	Tue Dec 31 19:33:26 2013 +0900
@@ -3,6 +3,7 @@
 
 #include "TaskManagerImpl.h"
 #include "FifoTaskManagerImpl.h"
+#include "Sem.h"
 #ifdef __CERIUM_CELL__
 #include "SpeThreads.h"
 #else
@@ -20,6 +21,7 @@
     /* variables */
     int cpu_num;
     int id_offset;
+    SemPtr recive_wait;  //semaphore to wait any message from spe
 
     QueueInfo<TaskList> **taskListInfo;
     QueueInfo<TaskList> **speTaskList;  // running task
--- a/TaskManager/Gpu/GpuThreads.cc	Mon Dec 30 20:16:36 2013 +0900
+++ b/TaskManager/Gpu/GpuThreads.cc	Tue Dec 31 19:33:26 2013 +0900
@@ -34,7 +34,12 @@
     args->useRefDma = use_refdma;
 
     pthread_create(&threads[0], NULL, &gpu_thread_run, args);
+}
 
+void
+GpuThreads::set_mail_waiter(SemPtr w)
+{
+    args->scheduler->connector->set_mail_waiter(w);
 }
 
 void *
--- a/TaskManager/Gpu/GpuThreads.h	Mon Dec 30 20:16:36 2013 +0900
+++ b/TaskManager/Gpu/GpuThreads.h	Tue Dec 31 19:33:26 2013 +0900
@@ -24,6 +24,7 @@
 } gpu_thread_arg_t;
 
 class GpuThreads : public Threads {
+
  public:
     /*
       static GpuThreads* getInstance() {
@@ -36,6 +37,8 @@
     void init();
     static void *gpu_thread_run(void *args);
     virtual int spawn_task(int cpu_num, TaskListPtr p);
+    virtual void set_mail_waiter(SemPtr w);
+
     int get_mail(int speid, int count, memaddr *ret);
     int has_mail(int speid, int count, memaddr *ret);
     void send_mail(int speid, int num, memaddr *data);
--- a/TaskManager/kernel/ppe/CpuThreads.cc	Mon Dec 30 20:16:36 2013 +0900
+++ b/TaskManager/kernel/ppe/CpuThreads.cc	Tue Dec 31 19:33:26 2013 +0900
@@ -93,7 +93,7 @@
     wait->sem_p();
 #endif
 
-    for (int i = 0; i < cpu_num; i++) {
+    for (int i = 0; i < cpu_num+io_num; i++) {
         args[i].cpuid = i + id_offset;
         args[i].scheduler = new MainScheduler();
         args[i].wait = wait;
@@ -102,16 +102,23 @@
         args[i].cpu_num = cpu_num;
     }
 
-    for (int i = 0; i < cpu_num; i++) {
+    for (int i = 0; i < cpu_num+io_num; i++) {
         pthread_create(&threads[i], NULL,
                        &cpu_thread_run, (void*)&args[i]);
     }
 
-    for (int i = 0; i < cpu_num; i++) {
+    for (int i = 0; i < cpu_num+io_num; i++) {
         wait->sem_p();
     }
 }
 
+void
+CpuThreads::set_mail_waiter(SemPtr w) {
+    for (int i = 0; i < cpu_num+io_num; i++) {
+        args[i].scheduler->connector->set_mail_waiter(w);
+    }
+}
+
 int
 CpuThreads::spawn_task(int id, TaskListPtr p) {
     p->cpu = id - id_offset;
--- a/TaskManager/kernel/ppe/CpuThreads.h	Mon Dec 30 20:16:36 2013 +0900
+++ b/TaskManager/kernel/ppe/CpuThreads.h	Tue Dec 31 19:33:26 2013 +0900
@@ -38,8 +38,7 @@
     virtual void add_output_tasklist(int command, memaddr buff, int alloc_size);
     virtual int is_gpu(int cpuid);
     virtual int spawn_task(int cpu_num,TaskListPtr p);
-
-private:
+    virtual void set_mail_waiter(SemPtr w);
     /* variables */
     pthread_t *threads;
     cpu_thread_arg_t *args;
--- a/TaskManager/kernel/ppe/Sem.cc	Mon Dec 30 20:16:36 2013 +0900
+++ b/TaskManager/kernel/ppe/Sem.cc	Tue Dec 31 19:33:26 2013 +0900
@@ -51,6 +51,19 @@
 
 }
 
+void
+Sem::sem_v1()
+{
+	pthread_mutex_lock(&sem->mutex);
+	//atomic
+	sem->value = 1;
+	//__sync_fetch_and_add(&sem->value,1);
+
+	//資源の解放を知らせる
+	pthread_cond_signal(&sem->cond);
+	pthread_mutex_unlock(&sem->mutex);
+}
+
 int
 Sem::count()
 {
--- a/TaskManager/kernel/ppe/Sem.h	Mon Dec 30 20:16:36 2013 +0900
+++ b/TaskManager/kernel/ppe/Sem.h	Tue Dec 31 19:33:26 2013 +0900
@@ -16,6 +16,7 @@
 	~Sem();
 	void sem_p();
 	void sem_v();
+	void sem_v1();
 	int  count();
 	/* variables */
 private:
--- a/TaskManager/kernel/ppe/SynchronizedMailManager.cc	Mon Dec 30 20:16:36 2013 +0900
+++ b/TaskManager/kernel/ppe/SynchronizedMailManager.cc	Tue Dec 31 19:33:26 2013 +0900
@@ -2,6 +2,11 @@
 #include "SynchronizedMailManager.h"
 
 void
+SynchronizedMailManager::set_waiter(SemPtr w) {
+    ppe_waiter = w;
+}
+
+void
 SynchronizedMailManager::calc_mask(unsigned int qsize)
 {
     mask = 1;
@@ -19,7 +24,7 @@
 
     queue_remain  = new Sem(size-1);			//queue内に入る残りの数
     queue_count	= new Sem(0);				//queue内に現在入っている数
-
+    ppe_waiter = 0;  // not notify send message
 }
 
 SynchronizedMailManager::~SynchronizedMailManager()
@@ -45,6 +50,8 @@
     write &= mask;
 
     queue_count->sem_v();		//資源+1
+    if (ppe_waiter)
+        ppe_waiter->sem_v1();  // notyfy message send
 }
 
 memaddr 
--- a/TaskManager/kernel/ppe/SynchronizedMailManager.h	Mon Dec 30 20:16:36 2013 +0900
+++ b/TaskManager/kernel/ppe/SynchronizedMailManager.h	Tue Dec 31 19:33:26 2013 +0900
@@ -9,7 +9,7 @@
 class SynchronizedMailManager : public MailManager {
 public:
     /* constructor */
-    SynchronizedMailManager(unsigned int qsize = 32) ;
+    SynchronizedMailManager(unsigned int qsize = 32);
 
     ~SynchronizedMailManager();
 
@@ -17,12 +17,14 @@
     void send(memaddr data);
     memaddr recv();
     int count();
+    void set_waiter(SemPtr w);
 
 private:
     /* variables */
     memaddr *queue;
     SemPtr queue_remain;
     SemPtr queue_count;
+    SemPtr ppe_waiter;  // shared among all synchronizedMailManager
     unsigned int size;
     unsigned int read;
     unsigned int write;
--- a/TaskManager/kernel/ppe/Threads.h	Mon Dec 30 20:16:36 2013 +0900
+++ b/TaskManager/kernel/ppe/Threads.h	Tue Dec 31 19:33:26 2013 +0900
@@ -6,6 +6,7 @@
 #include "types.h"
 #include "TaskList.h"
 
+class Sem;
 
 class Threads {
 public:
@@ -23,6 +24,7 @@
     virtual void add_output_tasklist(int command, memaddr buff, int alloc_size) = 0;
     virtual int is_gpu(int cpuid) { return 0; }
     virtual int spawn_task(int cpu_num, TaskListPtr p) = 0;
+    virtual void set_mail_waiter(Sem *w) = 0;
     /* variables */
     pthread_t *threads;
     int cpu_num;
--- a/TaskManager/kernel/schedule/DmaManager.h	Mon Dec 30 20:16:36 2013 +0900
+++ b/TaskManager/kernel/schedule/DmaManager.h	Tue Dec 31 19:33:26 2013 +0900
@@ -4,6 +4,7 @@
 #include "base.h"
 #include "ListData.h"
 #include "types.h"
+#include "Sem.h"
 
 enum dma_tag {
     DMA_READ  = 25,
@@ -41,6 +42,7 @@
     virtual void mail_write_finish_list(memaddr data) {}
     virtual memaddr mail_read() { return 0; }
     virtual memaddr task_list_mail_read() { return 0; }
+    virtual void set_mail_waiter(SemPtr w) {}
     
     // API for MFC list DMA transfer
     virtual void *dma_loadList(Scheduler *s, ListDataPtr list, uint32 mask) { return 0; }