Commit 174ed699 by xiaotong

fix memory leak

parent 8cae67f2
...@@ -42,12 +42,14 @@ void SmallTest(); ...@@ -42,12 +42,14 @@ void SmallTest();
int main( int argc, const char ** argv ) int main( int argc, const char ** argv )
{ {
//_CrtSetBreakAlloc(78); //_CrtSetBreakAlloc(123);
/* a tiny test */ /* a tiny test */
if(true) if(false)
SmallTest(); SmallTest();
return 0;
//_CrtDumpMemoryLeaks();
//return 0;
if(argc > 1 && !strcmp(argv[1], "-test")) if(argc > 1 && !strcmp(argv[1], "-test"))
Test(); Test();
......
...@@ -42,8 +42,10 @@ XLink::XLink() ...@@ -42,8 +42,10 @@ XLink::XLink()
/* deconstructor */ /* deconstructor */
XLink::~XLink() XLink::~XLink()
{ {
delete[] tails; if(tails != NULL)
delete[] (char*)params; delete[] tails;
if(params != NULL)
delete[] (char*)params;
} }
/* reset it */ /* reset it */
...@@ -311,18 +313,16 @@ void XLink::Replace(const XTensor * oldOne, XTensor * newOne) ...@@ -311,18 +313,16 @@ void XLink::Replace(const XTensor * oldOne, XTensor * newOne)
XLink &newIncome = newOne->income; XLink &newIncome = newOne->income;
XLink &newOutgo = newOne->outgo; XLink &newOutgo = newOne->outgo;
int incomeTailNum = newIncome.tailNum;
int outgoTailNum = newOutgo.tailNum;
XLink::ClearOutgoing(newOne); XLink::ClearOutgoing(newOne);
XLink::ClearIncoming(newOne); XLink::ClearIncoming(newOne);
if(incomeTailNum < oldOne->income.tailNum){ if(newIncome.tailNum < oldOne->income.tailNum){
delete[] newIncome.tails; delete[] newIncome.tails;
newIncome.tails = new XTensor*[newIncome.tailNum]; newIncome.tails = new XTensor*[oldOne->income.tailNum];
} }
/* incoming nodes for the new node */ /* incoming nodes */
newIncome.SetType(oldOne->income.typeID); newIncome.SetType(oldOne->income.typeID);
newIncome.head = newOne; newIncome.head = newOne;
newIncome.tailNum = oldOne->income.tailNum; newIncome.tailNum = oldOne->income.tailNum;
...@@ -346,12 +346,12 @@ void XLink::Replace(const XTensor * oldOne, XTensor * newOne) ...@@ -346,12 +346,12 @@ void XLink::Replace(const XTensor * oldOne, XTensor * newOne)
} }
} }
if(outgoTailNum < oldOne->outgo.tailNum){ if(newOutgo.tailNum < oldOne->outgo.tailNum){
delete[] newOutgo.tails; delete[] newOutgo.tails;
newOutgo.tails = new XTensor*[newOutgo.tailNum]; newOutgo.tails = new XTensor*[oldOne->outgo.tailNum];
} }
/* outgoing nodes for the new node */ /* outgoing nodes */
newOutgo.head = newOne; newOutgo.head = newOne;
newOutgo.tailNum = oldOne->outgo.tailNum; newOutgo.tailNum = oldOne->outgo.tailNum;
memcpy(newOutgo.tails, oldOne->outgo.tails, sizeof(XTensor*) * newOutgo.tailNum); memcpy(newOutgo.tails, oldOne->outgo.tails, sizeof(XTensor*) * newOutgo.tailNum);
......
...@@ -83,24 +83,10 @@ constructor ...@@ -83,24 +83,10 @@ constructor
*/ */
XTensor::XTensor() XTensor::XTensor()
{ {
memset(this, 0, sizeof(XTensor)); Init();
SetDataPointer(); SetDataPointer();
id = MakeTensorID(); id = MakeTensorID();
order = -1;
memset(dimSize, 0, sizeof(int) * MAX_TENSOR_DIM_NUM);
memset(dimSizeRDI, 0, sizeof(int) * MAX_TENSOR_DIM_NUM);
memset(isAllValued, 0, sizeof(bool) * MAX_TENSOR_DIM_NUM);
dataType = DEFAULT_DTYPE;
devID = -1;
unitSize = sizeof(float);
unitNum = 0;
unitNumNonZero = 0;
isSparse = false;
isShared = false;
denseRatio = 1.0F;
isDefaultDType = true; isDefaultDType = true;
isInGlobalMem = false; isInGlobalMem = false;
isInit = false; isInit = false;
...@@ -110,16 +96,9 @@ XTensor::XTensor() ...@@ -110,16 +96,9 @@ XTensor::XTensor()
/* constructor */ /* constructor */
XTensor::XTensor(const XTensor * reference) XTensor::XTensor(const XTensor * reference)
{ {
memset(this, 0, sizeof(XTensor)); Init();
SetDataPointer(); SetDataPointer();
id = MakeTensorID(); id = MakeTensorID();
dataType = DEFAULT_DTYPE;
devID = -1;
denseRatio = 1.0F;
isDefaultDType = true;
isInit = false;
isTmp = false;
InitTensor(this, reference); InitTensor(this, reference);
} }
...@@ -134,29 +113,13 @@ XTensor::XTensor(const int myOrder, int myDevID, XMem * myMem) ...@@ -134,29 +113,13 @@ XTensor::XTensor(const int myOrder, int myDevID, XMem * myMem)
{ {
CheckNTErrors((myOrder > 0), "Illegal tensor order1"); CheckNTErrors((myOrder > 0), "Illegal tensor order1");
Init();
SetDataPointer(); SetDataPointer();
id = MakeTensorID(); id = MakeTensorID();
order = myOrder; order = myOrder;
memset(dimSize, 0, sizeof(int) * MAX_TENSOR_DIM_NUM);
memset(dimSizeRDI, 0, sizeof(int) * MAX_TENSOR_DIM_NUM);
memset(isAllValued, 0, sizeof(bool) * MAX_TENSOR_DIM_NUM);
mem = myMem; mem = myMem;
data = NULL;
dataHost = NULL;
dataType = DEFAULT_DTYPE;
devID = myMem == NULL ? myDevID : myMem->devID; devID = myMem == NULL ? myDevID : myMem->devID;
unitSize = sizeof(float);
unitNum = 0;
unitNumNonZero = 0;
isSparse = false;
isShared = false;
denseRatio = 1.0F;
isDefaultDType = true;
isInGlobalMem = false;
isInit = false;
isTmp = false;
} }
/* /*
...@@ -173,25 +136,13 @@ XTensor::XTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYP ...@@ -173,25 +136,13 @@ XTensor::XTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYP
{ {
CheckNTErrors((myOrder > 0), "Illegal tensor order1"); CheckNTErrors((myOrder > 0), "Illegal tensor order1");
Init();
SetDataPointer(); SetDataPointer();
id = MakeTensorID(); id = MakeTensorID();
order = myOrder; order = myOrder;
memset(dimSize, 0, sizeof(int) * MAX_TENSOR_DIM_NUM);
memset(dimSizeRDI, 0, sizeof(int) * MAX_TENSOR_DIM_NUM);
memset(isAllValued, 0, sizeof(bool) * MAX_TENSOR_DIM_NUM);
mem = myMem; mem = myMem;
devID = myMem != NULL ? myMem->devID : myDevID; devID = myMem != NULL ? myMem->devID : myDevID;
data = NULL;
dataHost = NULL;
dataType = DEFAULT_DTYPE;
devID = myMem == NULL ? -1 : myMem->devID;
isShared = false;
isDefaultDType = true;
isInGlobalMem = false;
isInit = false;
isTmp = false;
Resize(myOrder, myDimSize, myDataType, myDenseRatio); Resize(myOrder, myDimSize, myDataType, myDenseRatio);
} }
...@@ -199,6 +150,7 @@ XTensor::XTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYP ...@@ -199,6 +150,7 @@ XTensor::XTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYP
/* copy constructor */ /* copy constructor */
XTensor::XTensor(const XTensor &reference) XTensor::XTensor(const XTensor &reference)
{ {
Init();
SetDataPointer(); SetDataPointer();
id = MakeTensorID(); id = MakeTensorID();
ShallowCopy(reference); ShallowCopy(reference);
...@@ -248,6 +200,7 @@ XTensor::~XTensor() ...@@ -248,6 +200,7 @@ XTensor::~XTensor()
dims[0] = -dims[0]; dims[0] = -dims[0];
XTensor * newTensor = new XTensor(order, dims, dataType, denseRatio, devID, mem); XTensor * newTensor = new XTensor(order, dims, dataType, denseRatio, devID, mem);
newTensor->SetTMP();
newTensor->data = data; newTensor->data = data;
data = NULL; data = NULL;
...@@ -260,6 +213,32 @@ XTensor::~XTensor() ...@@ -260,6 +213,32 @@ XTensor::~XTensor()
DestroyData(); DestroyData();
} }
/* initialize member variables */
void XTensor::Init()
{
id = -1;
mem = NULL;;
data = NULL;
dataHost = NULL;
dataP = NULL;
devID = -1;
order = -1;
memset(dimSize, 0, sizeof(int) * MAX_TENSOR_DIM_NUM);
memset(dimSizeRDI, 0, sizeof(int) * MAX_TENSOR_DIM_NUM);
dataType = DEFAULT_DTYPE;
unitSize = sizeof(float);
unitNum = 0;
isSparse = false;
unitNumNonZero = 0;
denseRatio = 1.0F;
isShared = false;
isDefaultDType = true;
isInGlobalMem = false;
memset(isAllValued, 0, sizeof(bool) * MAX_TENSOR_DIM_NUM);
isInit = false;
isTmp = false;
}
/* delete data arrays */ /* delete data arrays */
void XTensor::DestroyData() void XTensor::DestroyData()
{ {
......
...@@ -175,6 +175,9 @@ public: ...@@ -175,6 +175,9 @@ public:
/* de-constructor */ /* de-constructor */
~XTensor(); ~XTensor();
/* initialize member variables */
void Init();
/* delete data arrays */ /* delete data arrays */
void DestroyData(); void DestroyData();
......
...@@ -433,7 +433,7 @@ void CudaTopK(XTensor * a, XTensor * b, XTensor * index, int dim, int k) ...@@ -433,7 +433,7 @@ void CudaTopK(XTensor * a, XTensor * b, XTensor * index, int dim, int k)
int dimSize[MAX_TENSOR_DIM_NUM]; int dimSize[MAX_TENSOR_DIM_NUM];
memcpy(dimSize, a->dimSize, sizeof(int) * a->order); memcpy(dimSize, a->dimSize, sizeof(int) * a->order);
dimSize[0] = -dimSize[0]; dimSize[0] = -dimSize[0];
XTensor * indexA = new XTensor(a->order, dimSize, X_INT, 1.0F, a->mem); XTensor * indexA = new XTensor(a->order, dimSize, X_INT, 1.0F, a->devID, a->mem);
indexA->data = a->mem != NULL ? a->mem->AllocBuf(a->devID, a->unitNum * sizeof(int)) : XMemAlloc(a->devID, a->unitNum * sizeof(int)); indexA->data = a->mem != NULL ? a->mem->AllocBuf(a->devID, a->unitNum * sizeof(int)) : XMemAlloc(a->devID, a->unitNum * sizeof(int));
/* make the index tensor */ /* make the index tensor */
......
...@@ -282,7 +282,7 @@ void CudaSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x, ...@@ -282,7 +282,7 @@ void CudaSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor * ytmp = NewTensor(y, false); XTensor * ytmp = NewTensor(y, false);
/* make a matrix to keep \beta */ /* make a matrix to keep \beta */
XTensor * beta = new XTensor(y->order - 1, dimSize, y->dataType, y->denseRatio, mem); XTensor * beta = new XTensor(y->order - 1, dimSize, y->dataType, y->denseRatio, y->devID, mem);
ytmp->data = mem->AllocBuf(mem->devID, y->unitNum * y->unitSize); ytmp->data = mem->AllocBuf(mem->devID, y->unitNum * y->unitSize);
beta->data = mem->AllocBuf(mem->devID, beta->unitNum * beta->unitSize); beta->data = mem->AllocBuf(mem->devID, beta->unitNum * beta->unitSize);
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论