Commit c8da72b4 by xiaotong

updates of locks

parent 7c53210c
......@@ -125,6 +125,78 @@ void XLeader::InitForRun()
}
}
/* mark the workers as LOCKED */
void XLeader::LockWorkers()
{
int paramNum = serverModel.paramNum;
for (int i = 0; i < jworkers.count; i++) {
XWorkerJob * worker = (XWorkerJob*)jworkers[i];
worker->ResetJobCount();
worker->LockCount();
}
for (int i = 0; i < cworkers.count; i++) {
XWorkerCollect * worker = (XWorkerCollect*)cworkers[i];
worker->ResetJobCount();
worker->LockCount();
}
for (int i = 0; i < uworkers.count; i++) {
XWorkerUpdate * worker = (XWorkerUpdate*)uworkers[i];
worker->ResetJobCount(paramNum);
worker->LockCount();
}
for (int i = 0; i < bworkers.count; i++) {
XWorkerBroadcast * worker = (XWorkerBroadcast*)bworkers[i];
worker->ResetJobCount(paramNum);
worker->LockCount();
}
}
/*
wait for unlocked workers (i.e., all workers finish their jobs).
It is used as a response to LockWorkers(). It tries to lock the mutex
in each worker. Because the mutex has been locked in LockWorkers(),
WaitForUnlockedWorkers() must wait until the worker finishes its job and
unlock the mutex. In this way, WaitForUnlockedWorkers() would return only
if all workers finish their jobs.
>> activeJobWorkers - flags that indicat whether a job worker is active
*/
void XLeader::WaitForUnlockedWorkers(const int * activeJobWorkers)
{
int activeNum = 0;
for (int i = 0; i < jworkers.count; i++) {
XWorkerJob * worker = (XWorkerJob*)jworkers[i];
if (activeJobWorkers[i] > 0) {
worker->LockCount();
CheckNTErrors(worker->IsFinished(),
"Something is wrong with the worker!");
worker->UnlockCount();
activeNum++;
}
}
XList workers;
workers.AddList(&cworkers);
workers.AddList(&uworkers);
workers.AddList(&bworkers);
for (int i = 0; i < workers.count; i++) {
XWorker * worker = (XWorker*)workers[i];
if (activeNum > 0) {
worker->LockCount();
CheckNTErrors(worker->IsFinished(),
"Something is wrong with the worker!");
worker->UnlockCount();
}
else {
worker->UnlockCount();
}
}
}
/* get loss */
float XLeader::GetLoss()
{
......@@ -278,6 +350,8 @@ bool XLeader::Run(XConfig * config, DataDistributeBase * dataDistributor,
InitForRun();
LockWorkers();
for (int i = 0; i < jworkers.count; i++)
active[i] = 0;
......@@ -306,6 +380,9 @@ bool XLeader::Run(XConfig * config, DataDistributeBase * dataDistributor,
/* job in queue 1: make a record of the run */
worker->AddJobRecord();
/* job in quene 1: finish the job */
worker->AddJobCountFinished();
active[i] = 1;
activeJobCount++;
}
......@@ -338,6 +415,7 @@ bool XLeader::Run(XConfig * config, DataDistributeBase * dataDistributor,
collecter->AddJobUpdateAll(&members, &membersAll, &serverModel,
optimizer, updater, broadcaster);
collecter->AddJobCollectOther(&memberRecords, &serverRecord);
collecter->AddJobCountFinished();
/* jobs in queue 2: collect the (gradient) data and other stuff. This
is a reduce process. */
......@@ -352,9 +430,11 @@ bool XLeader::Run(XConfig * config, DataDistributeBase * dataDistributor,
not involved in this run. */
//broadcaster->AddJobBroadcast(&serverModel, &membersAll);
WaitForFinishing();
//WaitForFinishing();
}
WaitForUnlockedWorkers(active);
for (int i = 0; i < jworkers.count; i++) {
XWorkerJob * worker = (XWorkerJob*)jworkers[i];
worker->Clear();
......
......@@ -112,6 +112,12 @@ public:
/* initialize the models for running them */
void InitForRun();
/* mark the workers as LOCKED */
void LockWorkers();
/* wait for unlocked workers (i.e., all workers finish their jobs) */
void WaitForUnlockedWorkers(const int * activeJobWorkers);
/* get loss */
float GetLoss();
......
......@@ -130,13 +130,31 @@ void XWorker::LockCount()
{
MUTEX_LOCK(countMutex);
}
/* unlock the count mutex */
void XWorker::UnlockCount()
{
MUTEX_UNLOCK(countMutex);
}
/* check if the worker finishes its job */
bool XWorker::IsFinished()
{
if (jobCountExpected <= 0)
return true;
if (jobCountExpected == jobCountFinished)
return true;
return false;
}
/* count a finished job */
void XWorker::CountFinishedJob()
{
jobCountFinished++;
if(jobCountExpected < 0 || jobCountFinished > jobCountExpected){
if(jobCountExpected < 0 || jobCountFinished >= jobCountExpected){
MUTEX_UNLOCK(countMutex);
}
else{
......
......@@ -115,6 +115,12 @@ public:
/* lock the count mutex */
void LockCount();
/* unlock the count mutex */
void UnlockCount();
/* check if the worker finishes its job */
bool IsFinished();
/* count a finished job */
void CountFinishedJob();
......
......@@ -191,10 +191,17 @@ bool XWorkerBroadcast::AddJobBroadcastSingle(XModel * source, XList * targetList
args.AddList(targetList);
args.AddInt(pid);
if (isInstantRun)
XList argsNULL;
argsNULL.Add(this);
if (isInstantRun) {
XWorkerBroadcast::BroadcastSingle(&args);
else
XWorkerBroadcast::CountFinished(&argsNULL);
}
else {
queue.EnqueueJob((void*)(char*)XWorkerBroadcast::BroadcastSingle, &args);
queue.EnqueueJob((void*)(char*)XWorkerBroadcast::CountFinished, &argsNULL);
}
return true;
}
......
......@@ -124,8 +124,9 @@ void XWorkerCollect::UpdateDataAll(XList * memberActive, XList * memberAll, XMod
(in another thread) */
if (finishedCount[j] == memberActive->count) {
paramServer.flag = PARAM_STATE_COLLECTED;
if(updater != NULL)
if (updater != NULL) {
updater->AddJobUpdateSingle(server, memberAll, j, optimizer, broadcaster);
}
}
else if (finishedCount[j] > memberActive->count) {
ShowNTErrors("Something is wrong with finishedCount!");
......
......@@ -189,10 +189,17 @@ bool XWorkerUpdate::AddJobUpdateSingle(XModel * model, XList * members, int pid,
args.Add(optimizer);
args.Add(broadcaster);
if (isInstantRun)
XList argsNULL;
argsNULL.Add(this);
if (isInstantRun) {
XWorkerUpdate::UpdateSingle(&args);
else
XWorkerUpdate::CountFinished(&argsNULL);
}
else {
queue.EnqueueJob((void*)(char*)XWorkerUpdate::UpdateSingle, &args);
queue.EnqueueJob((void*)(char*)XWorkerUpdate::CountFinished, &argsNULL);
}
return true;
}
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论