Commit 85ee1664 by xiaotong

fix memory leak

parent 7655f198
...@@ -32,25 +32,27 @@ ...@@ -32,25 +32,27 @@
#include "test/Test.h" #include "test/Test.h"
//#define CRTDBG_MAP_ALLOC
//#include <stdlib.h>
//#include <crtdbg.h>
using namespace nts; using namespace nts;
using namespace samplefnnlm; using namespace samplefnnlm;
//#define CRTDBG_MAP_ALLOC
//#include <stdlib.h>
//#include <crtdbg.h>
int main( int argc, const char ** argv ) int main( int argc, const char ** argv )
{ {
srand((unsigned)time(0));
if(argc > 1 && !strcmp(argv[1], "-test")) if(argc > 1 && !strcmp(argv[1], "-test"))
Test(); Test();
if(argc > 1 && !strcmp(argv[1], "-fnnlm")) else if(argc > 1 && !strcmp(argv[1], "-fnnlm"))
return FNNLMMain(argc - 1, argv + 1); FNNLMMain(argc - 1, argv + 1);
else{ else{
fprintf(stderr, "Thanks for using NiuTrans.Tensor! This is a library that eases the\n"); fprintf(stderr, "Thanks for using NiuTrans.Tensor! This is a library that eases the\n");
fprintf(stderr, "use of tensors. All you need is to ... \n\n"); fprintf(stderr, "use of tensors. All you need is to ... \n\n");
fprintf(stderr, "Run this program with \"-test\" for unit test!\n"); fprintf(stderr, "Run this program with \"-test\" for unit test!\n");
fprintf(stderr, "Or run this program with \"-fnnlm\" for sample FNNLM!\n"); fprintf(stderr, "Or run this program with \"-fnnlm\" for sample FNNLM!\n");
} }
//_CrtDumpMemoryLeaks();
return 0; return 0;
} }
...@@ -94,9 +94,9 @@ void MatrixMul(XTensor * a, MATRIX_TRANS_TYPE transposedA, ...@@ -94,9 +94,9 @@ void MatrixMul(XTensor * a, MATRIX_TRANS_TYPE transposedA,
XList * aList = new XList(10); XList * aList = new XList(10);
XList * bList = new XList(10); XList * bList = new XList(10);
XList * cList = new XList(10); XList * cList = new XList(10);
int aDimSize[2] = { a->dimSizeRDI[1], a->dimSizeRDI[0] }; int aDimSize[2] = { -a->dimSizeRDI[1], a->dimSizeRDI[0] };
int bDimSize[2] = { b->dimSizeRDI[1], b->dimSizeRDI[0] }; int bDimSize[2] = { -b->dimSizeRDI[1], b->dimSizeRDI[0] };
int cDimSize[2] = { c->dimSizeRDI[1], c->dimSizeRDI[0] }; int cDimSize[2] = { -c->dimSizeRDI[1], c->dimSizeRDI[0] };
bool isSparseMul = false; bool isSparseMul = false;
......
...@@ -293,7 +293,7 @@ void CudaSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x, ...@@ -293,7 +293,7 @@ void CudaSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
/* ytmp = dE/dy_j - \beta */ /* ytmp = dE/dy_j - \beta */
Unsqueeze(beta, ytmp, leadDim, y->dimSize[leadDim]); Unsqueeze(beta, ytmp, leadDim, y->dimSize[leadDim]);
Sum(dedy, ytmp, ytmp, -1.0F); _Sum(dedy, ytmp, ytmp, -1.0F);
/* dE/ds_j = y_j * ytmp = y_j * (dE/dy_j - \beta) */ /* dE/ds_j = y_j * ytmp = y_j * (dE/dy_j - \beta) */
Multiply(y, ytmp, dedx, 0); Multiply(y, ytmp, dedx, 0);
......
...@@ -73,7 +73,7 @@ bool TestSum1() ...@@ -73,7 +73,7 @@ bool TestSum1()
bGPU->SetData(bData, unitNum); bGPU->SetData(bData, unitNum);
/* call sum function */ /* call sum function */
Sum(aGPU, bGPU); _Sum(aGPU, bGPU);
/* check results */ /* check results */
gpuTest = aGPU->CheckData(answer, unitNum); gpuTest = aGPU->CheckData(answer, unitNum);
...@@ -151,7 +151,7 @@ bool TestSum2() ...@@ -151,7 +151,7 @@ bool TestSum2()
cGPU->SetZeroAll(); cGPU->SetZeroAll();
/* call Sum function */ /* call Sum function */
Sum(aGPU, bGPU, cGPU, beta); _Sum(aGPU, bGPU, cGPU, beta);
/* check results */ /* check results */
gpuTest = cGPU->CheckData(answer, unitNum); gpuTest = cGPU->CheckData(answer, unitNum);
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论