Commit c13a8c63 by xiaotong

fix the bug of use memory pool in FREE_ON_THE_FLY mode

parent cb90f909
......@@ -45,20 +45,17 @@ void _Softmax(const XTensor * x, XTensor * y, int leadDim)
int * dimSize = new int[x->order - 1];
for(int i = 0; i < x->order; i++){
if(i < leadDim)
dimSize[i] = -x->dimSize[i];
dimSize[i] = x->dimSize[i];
else if(i > leadDim)
dimSize[i - 1] = -x->dimSize[i];
dimSize[i - 1] = x->dimSize[i];
}
XMem * mem = x->mem;
XTensor * max = NULL;
XTensor * sum = NULL;
max = NewTensor(x->order - 1, dimSize, x->dataType, x->denseRatio, x->devID, mem);
sum = NewTensor(x->order - 1, dimSize, x->dataType, x->denseRatio, x->devID, mem);
max->data = mem != NULL ? (char*)mem->AllocBuf(mem->devID, max->unitNum * max->unitSize) : XMemAlloc(max->devID, max->unitNum * max->unitSize);
sum->data = mem != NULL ? (char*)mem->AllocBuf(mem->devID, sum->unitNum * sum->unitSize) : XMemAlloc(sum->devID, sum->unitNum * sum->unitSize);
max = NewTensorBuf(x->order - 1, dimSize, x->dataType, x->denseRatio, x->devID, mem);
sum = NewTensorBuf(x->order - 1, dimSize, x->dataType, x->denseRatio, x->devID, mem);
_ReduceMax(x, max, leadDim);
_ReduceSum(x, sum, leadDim, max, 1.0F, true);
......@@ -114,18 +111,9 @@ void _Softmax(const XTensor * x, XTensor * y, int leadDim)
}
}
if(mem != NULL){
mem->ReleaseBuf(mem->devID, max->unitNum * max->unitSize);
mem->ReleaseBuf(mem->devID, sum->unitNum * sum->unitSize);
}
else{
XMemFree(max->devID, max->data);
XMemFree(sum->devID, sum->data);
max->data = NULL;
sum->data = NULL;
}
delete max;
delete sum;
DelTensorBuf(sum);
DelTensorBuf(max);
delete[] dimSize;
}
else
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论