Commit 003def3d by xuchen

1.Update test case 2.Bug fixed in MatrxiMulBatched function 3.Bug fixed in SumByColumnVT function

parent f12ced64
...@@ -89,9 +89,9 @@ void MatrixMulBatched(XTensor * a, MATRIX_TRANS_TYPE transposedA, ...@@ -89,9 +89,9 @@ void MatrixMulBatched(XTensor * a, MATRIX_TRANS_TYPE transposedA,
void * ap = (char*)a->data + aRealBlockSize * p; void * ap = (char*)a->data + aRealBlockSize * p;
void * bp = (char*)b->data + bRealBlockSize * p; void * bp = (char*)b->data + bRealBlockSize * p;
void * cp = (char*)c->data + cRealBlockSize * p; void * cp = (char*)c->data + cRealBlockSize * p;
XTensor * ai = new XTensor(2, aDimSize, a->dataType, a->denseRatio, a->mem); XTensor * ai = NewTensor(2, aDimSize, a->dataType, a->denseRatio, a->devID, a->mem);
XTensor * bi = new XTensor(2, bDimSize, b->dataType, b->denseRatio, b->mem); XTensor * bi = NewTensor(2, bDimSize, b->dataType, b->denseRatio, a->devID, b->mem);
XTensor * ci = new XTensor(2, cDimSize, c->dataType, c->denseRatio, c->mem); XTensor * ci = NewTensor(2, cDimSize, c->dataType, c->denseRatio, a->devID, c->mem);
ai->data = ap; ai->data = ap;
bi->data = bp; bi->data = bp;
ci->data = cp; ci->data = cp;
......
...@@ -52,7 +52,7 @@ void KernelADDByColumnVT(DTYPE * a, DTYPE * b, DTYPE * c, int colNum, int rowNum ...@@ -52,7 +52,7 @@ void KernelADDByColumnVT(DTYPE * a, DTYPE * b, DTYPE * c, int colNum, int rowNum
DTYPE * bp = b + (rowNum * k + row) * colNum; DTYPE * bp = b + (rowNum * k + row) * colNum;
if (colNum % 4 == 0) { if (colNum % 4 == 0) {
for (int i = 0; i < colNum; i += 4) for (int i = 0; i < colNum; i += 4)
sum += bp[i] + bp[i + 1] + b[i + 2] + b[i + 3]; sum += bp[i] + bp[i + 1] + bp[i + 2] + bp[i + 3];
} }
else if (colNum % 2 == 0) { else if (colNum % 2 == 0) {
for (int i = 0; i < colNum; i += 2) for (int i = 0; i < colNum; i += 2)
......
...@@ -483,9 +483,9 @@ bool TestConcatenate4() ...@@ -483,9 +483,9 @@ bool TestConcatenate4()
delete sGPU1; delete sGPU1;
delete sGPU2; delete sGPU2;
delete tGPU; delete tGPU;
delete[] sDimSize1; //delete[] sDimSize1;
delete[] sDimSize2; //delete[] sDimSize2;
delete[] tDimSize; //delete[] tDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
......
...@@ -20,11 +20,12 @@ ...@@ -20,11 +20,12 @@
*/ */
#include "TCopyIndexed.h" #include "TCopyIndexed.h"
#include "../xc/Mycode.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* /*
case 1 copy indexed sub-tensors case 1: copy indexed sub-tensors
In this case, (3, 2, 3) -> (3, 2, 2), dim = 2, indexSize = 2, In this case, (3, 2, 3) -> (3, 2, 2), dim = 2, indexSize = 2,
srcIndex = [0, 2], tgtIndex = [0, 1], copyNum = 1. srcIndex = [0, 2], tgtIndex = [0, 1], copyNum = 1.
*/ */
...@@ -127,6 +128,213 @@ bool TestCopyIndexed1() ...@@ -127,6 +128,213 @@ bool TestCopyIndexed1()
#endif // USE_CUDA #endif // USE_CUDA
} }
/*
case 2: copy indexed sub-tensors
In this case, (3, 2, 3) -> (3, 2, 2), dim = 2, indexSize = 2,
srcIndex = [0, 2], tgtIndex = [1, 0], copyNum = 1.
*/
bool TestCopyIndexed2()
{
/* a input tensor of size (3, 2, 3) */
int sOrder = 3;
int * sDimSize = new int[sOrder];
sDimSize[0] = 3;
sDimSize[1] = 2;
sDimSize[2] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a output tensor of size (3, 2, 2) */
int tOrder = 3;
int * tDimSize = new int[tOrder];
tDimSize[0] = 3;
tDimSize[1] = 2;
tDimSize[2] = 2;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData[3][2][3] = { { {0.0F, -1.0F, 2.0F},
{2.0F, 1.0F, 3.0F} },
{ {1.0F, 2.0F, 4.0F},
{3.0F, 1.0F, 2.0F}},
{ {-1.0F, 3.0F, 2.0F},
{1.0F, -1.0F, 0.0F} } };
DTYPE answer[3][2][2] = { { {2.0F, 0.0F},
{3.0F, 2.0F} },
{ {4.0F, 1.0F},
{2.0F, 3.0F}},
{ {2.0F, -1.0F},
{0.0F, 1.0F} } };
int dim = 2;
int indexSize = 2;
int srcIndex[2] = {0, 2};
int tgtIndex[2] = {1, 0};
int copyNum = 1;
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s->SetData(sData, sUnitNum);
t->SetZeroAll();
/* call CopyIndexed function */
CopyIndexed(s, t, dim, srcIndex, indexSize, tgtIndex, copyNum);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(sOrder, tDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
sGPU->SetData(sData, sUnitNum);
tGPU->SetZeroAll();
/* call CopyIndexed function */
CopyIndexed(sGPU, tGPU, dim, srcIndex, indexSize, tgtIndex, copyNum);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s;
delete t;
delete sGPU;
delete tGPU;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s;
delete t;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/*
case 3: copy indexed sub-tensors
In this case, (3, 2, 3) -> (3, 2, 2), dim = 2, indexSize = 1,
srcIndex = [0], tgtIndex = [0], copyNum = 2.
*/
bool TestCopyIndexed3()
{
/* a input tensor of size (3, 2, 3) */
int sOrder = 3;
int * sDimSize = new int[sOrder];
sDimSize[0] = 3;
sDimSize[1] = 2;
sDimSize[2] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a output tensor of size (3, 2, 2) */
int tOrder = 3;
int * tDimSize = new int[tOrder];
tDimSize[0] = 3;
tDimSize[1] = 2;
tDimSize[2] = 2;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData[3][2][3] = { { {0.0F, -1.0F, 2.0F},
{2.0F, 1.0F, 3.0F} },
{ {1.0F, 2.0F, 4.0F},
{3.0F, 1.0F, 2.0F}},
{ {-1.0F, 3.0F, 2.0F},
{1.0F, -1.0F, 0.0F} } };
DTYPE answer[3][2][2] = { { {0.0F, -1.0F},
{2.0F, 1.0F} },
{ {1.0F, 2.0F},
{3.0F, 1.0F}},
{ {-1.0F, 3.0F},
{1.0F, -1.0F} } };
int dim = 2;
int indexSize = 1;
int srcIndex[1] = {0};
int tgtIndex[1] = {0};
int copyNum = 2;
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s->SetData(sData, sUnitNum);
t->SetZeroAll();
/* call CopyIndexed function */
CopyIndexed(s, t, dim, srcIndex, indexSize, tgtIndex, copyNum);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(sOrder, tDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
sGPU->SetData(sData, sUnitNum);
tGPU->SetZeroAll();
/* call CopyIndexed function */
CopyIndexed(sGPU, tGPU, dim, srcIndex, indexSize, tgtIndex, copyNum);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s;
delete t;
delete sGPU;
delete tGPU;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s;
delete t;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */ /* other cases */
/* /*
TODO!! TODO!!
...@@ -147,6 +355,24 @@ bool TestCopyIndexed() ...@@ -147,6 +355,24 @@ bool TestCopyIndexed()
else else
XPRINT(0, stdout, ">> case 1 passed!\n"); XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestCopyIndexed2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 2 failed!\n");
}
else
XPRINT(0, stdout, ">> case 2 passed!\n");
/* case 3 test */
caseFlag = TestCopyIndexed3();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 3 failed!\n");
}
else
XPRINT(0, stdout, ">> case 3 passed!\n");
/* other cases test */ /* other cases test */
/* /*
TODO!! TODO!!
......
...@@ -30,15 +30,15 @@ Identity function: y = x ...@@ -30,15 +30,15 @@ Identity function: y = x
*/ */
bool TestIdentity1() bool TestIdentity1()
{ {
/* a input tensor of size (2, 3) */ /* a tensor of size (2, 3) */
int sOrder = 2; int order = 2;
int * sDimSize = new int[sOrder]; int * dimSize = new int[order];
sDimSize[0] = 2; dimSize[0] = 2;
sDimSize[1] = 3; dimSize[1] = 3;
int sUnitNum = 1; int unitNum = 1;
for (int i = 0; i < sOrder; i++) for (int i = 0; i < order; i++)
sUnitNum *= sDimSize[i]; unitNum *= dimSize[i];
DTYPE xData[2][3] = { {0.0F, 1.0F, 2.0F}, DTYPE xData[2][3] = { {0.0F, 1.0F, 2.0F},
{0.5F, 0.7F, 1.4F} }; {0.5F, 0.7F, 1.4F} };
...@@ -49,47 +49,50 @@ bool TestIdentity1() ...@@ -49,47 +49,50 @@ bool TestIdentity1()
bool cpuTest = true; bool cpuTest = true;
/* create tensors */ /* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize); XTensor * x = NewTensor(order, dimSize);
XTensor * y = NewTensor(sOrder, sDimSize); XTensor * y = NewTensor(order, dimSize);
/* initialize variables */ /* initialize variables */
x->SetData(xData, sUnitNum); x->SetData(xData, unitNum);
y->SetZeroAll(); y->SetZeroAll();
/* call Identity function */ /* call Identity function */
Identity(x, y); Identity(x, y);
/* check result */ /* check result */
cpuTest = y->CheckData(answer, sUnitNum); cpuTest = y->CheckData(answer, unitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
/* create tensors */ /* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * xGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * yGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */ /* initialize variables */
xGPU->SetData(xData, sUnitNum); xGPU->SetData(xData, unitNum);
yGPU->SetZeroAll(); yGPU->SetZeroAll();
/* call Identity function */ /* call Identity function */
Identity(xGPU, yGPU); Identity(xGPU, yGPU);
/* check result */ /* check result */
gpuTest = yGPU->CheckData(answer, sUnitNum); gpuTest = yGPU->CheckData(answer, unitNum);
/* destroy variables */ /* destroy variables */
delete x, y; delete x;
delete xGPU, yGPU; delete y;
delete[] sDimSize; delete xGPU;
delete yGPU;
delete[] dimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete x, y; delete x;
delete[] sDimSize; delete y;
delete[] dimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
...@@ -98,35 +101,39 @@ bool TestIdentity1() ...@@ -98,35 +101,39 @@ bool TestIdentity1()
/* /*
case 2: test IdentityBackward function. case 2: test IdentityBackward function.
IdentityBackward function: dE/dx = dE/dy * dy/dx = dE/dy IdentityBackward function: dE/dx = dE/dy * dy/dx = dE/dy
In this case, lossName=CROSSENTROPY.
*/ */
bool TestIdentity2() bool TestIdentity2()
{ {
int sOrder = 2; /* a tensor of size (2, 3) */
int * sDimSize = new int[sOrder]; int order = 2;
sDimSize[0] = 1; int * dimSize = new int[order];
sDimSize[1] = 3; dimSize[0] = 1;
dimSize[1] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++) int unitNum = 1;
sUnitNum *= sDimSize[i]; for (int i = 0; i < order; i++)
unitNum *= dimSize[i];
DTYPE xData[1][3] = { {1.0F, 1.0F, 2.0F} };
DTYPE gData[1][3] = { {0.0F, 0.0F, 1.0F} }; DTYPE xData[3] = {1.0F, 1.0F, 2.0F};
DTYPE gData[3] = {0.0F, 0.0F, 1.0F};
DTYPE yAnswer[3] = {1.0F, 1.0F, 2.0F};
DTYPE dedyAnswer[3] = {0.0F, 0.0F, -0.5F};
DTYPE dedxAnswer[3] = {0.0F, 0.0F, -0.5F}; DTYPE dedxAnswer[3] = {0.0F, 0.0F, -0.5F};
/* CPU test */ /* CPU test */
bool cpuTest = true; bool cpuTest = true;
/* create tensors */ /* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize); XTensor * x = NewTensor(order, dimSize);
XTensor * y = NewTensor(sOrder, sDimSize); XTensor * y = NewTensor(order, dimSize);
XTensor * g = NewTensor(sOrder, sDimSize); XTensor * g = NewTensor(order, dimSize);
XTensor * dedy = NewTensor(sOrder, sDimSize); XTensor * dedy = NewTensor(order, dimSize);
XTensor * dedx = NewTensor(sOrder, sDimSize); XTensor * dedx = NewTensor(order, dimSize);
/* initialize variables */ /* initialize variables */
x->SetData(xData, sUnitNum); x->SetData(xData, unitNum);
g->SetData(gData, sUnitNum); g->SetData(gData, unitNum);
y->SetZeroAll(); y->SetZeroAll();
dedx->SetZeroAll(); dedx->SetZeroAll();
dedy->SetZeroAll(); dedy->SetZeroAll();
...@@ -138,22 +145,24 @@ bool TestIdentity2() ...@@ -138,22 +145,24 @@ bool TestIdentity2()
IdentityBackward(g, y, x, dedy, dedx, CROSSENTROPY); IdentityBackward(g, y, x, dedy, dedx, CROSSENTROPY);
/* check result */ /* check result */
cpuTest = dedx->CheckData(dedxAnswer, sUnitNum, 1e-4F); cpuTest = y->CheckData(yAnswer, unitNum, 1e-4F)
&& dedx->CheckData(dedxAnswer, unitNum, 1e-4F)
&& dedy->CheckData(dedyAnswer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
/* create tensors */ /* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * xGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * yGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * gGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * gGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * dedyGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * dedxGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */ /* initialize variables */
xGPU->SetData(xData, sUnitNum); xGPU->SetData(xData, unitNum);
gGPU->SetData(gData, sUnitNum); gGPU->SetData(gData, unitNum);
yGPU->SetZeroAll(); yGPU->SetZeroAll();
dedxGPU->SetZeroAll(); dedxGPU->SetZeroAll();
dedyGPU->SetZeroAll(); dedyGPU->SetZeroAll();
...@@ -165,7 +174,9 @@ bool TestIdentity2() ...@@ -165,7 +174,9 @@ bool TestIdentity2()
IdentityBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, CROSSENTROPY); IdentityBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, CROSSENTROPY);
/* check result */ /* check result */
gpuTest = dedxGPU->CheckData(dedxAnswer, sUnitNum, 1e-4F); gpuTest = yGPU->CheckData(yAnswer, unitNum, 1e-4F)
&& dedxGPU->CheckData(dedxAnswer, unitNum, 1e-4F)
&& dedyGPU->CheckData(dedyAnswer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
...@@ -178,7 +189,7 @@ bool TestIdentity2() ...@@ -178,7 +189,7 @@ bool TestIdentity2()
delete gGPU; delete gGPU;
delete dedxGPU; delete dedxGPU;
delete dedyGPU; delete dedyGPU;
delete[] sDimSize; delete[] dimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
...@@ -188,7 +199,7 @@ bool TestIdentity2() ...@@ -188,7 +199,7 @@ bool TestIdentity2()
delete g; delete g;
delete dedx; delete dedx;
delete dedy; delete dedy;
delete[] sDimSize; delete[] dimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
......
...@@ -20,16 +20,15 @@ ...@@ -20,16 +20,15 @@
*/ */
#include "../core/math/ScaleAndShift.h" #include "../core/math/ScaleAndShift.h"
#include "../function/Loss.h"
#include "TLoss.h" #include "TLoss.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* /*
case 1: test LossCompute function case 1: test LossCompute function.
In this case, Loss function name = SQUAREDERROR. In this case, Loss function name = SQUAREDERROR.
loss = sum_{i} 0.5*(t_i - y_i)^2, loss = sum_{i} 0.5*(t_i - y_i)^2,
where t_i is the gold standard and y_i is the model output where t_i is the gold standard and y_i is the model output.
*/ */
bool TestLoss1() bool TestLoss1()
{ {
...@@ -103,10 +102,10 @@ bool TestLoss1() ...@@ -103,10 +102,10 @@ bool TestLoss1()
} }
/* /*
case 2: test LossCompute function case 2: test LossCompute function.
In this case, Loss function name = CROSSENTROPY. In this case, Loss function name = CROSSENTROPY.
loss = sum_{i} (-t_i * log(y_i)) loss = sum_{i} (-t_i * log(y_i))
where t_i is the gold standard and y_i is the model output where t_i is the gold standard and y_i is the model output.
*/ */
bool TestLoss2() bool TestLoss2()
{ {
...@@ -180,10 +179,10 @@ bool TestLoss2() ...@@ -180,10 +179,10 @@ bool TestLoss2()
} }
/* /*
case 3: test LossCompute function case 3: test LossCompute function.
In this case, Loss function name = ONEHOTERROR. In this case, Loss function name = ONEHOTERROR.
loss = sum_{i} e_i loss = sum_{i} e_i
where e_i = 0.5*(t_i - y_i)^2 if t_i = 1, e_i = 0 otherwise where e_i = 0.5*(t_i - y_i)^2 if t_i = 1, e_i = 0 otherwise.
*/ */
bool TestLoss3() bool TestLoss3()
{ {
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15
*/ */
#include "../XTensor.h"
#include "TMatrixMulBatched.h" #include "TMatrixMulBatched.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
......
...@@ -29,25 +29,15 @@ In this case, y = max(0, x) ...@@ -29,25 +29,15 @@ In this case, y = max(0, x)
*/ */
bool TestRectify1() bool TestRectify1()
{ {
/* a x tensor of size (2, 3) */ /* a tensor of size (2, 3) */
int xOrder = 2; int order = 2;
int * xDimSize = new int[xOrder]; int * dimSize = new int[order];
xDimSize[0] = 2; dimSize[0] = 2;
xDimSize[1] = 3; dimSize[1] = 3;
int xUnitNum = 1; int unitNum = 1;
for (int i = 0; i < xOrder; i++) for (int i = 0; i < order; i++)
xUnitNum *= xDimSize[i]; unitNum *= dimSize[i];
/* a y tensor of size (2, 3) */
int yOrder = 2;
int * yDimSize = new int[yOrder];
yDimSize[0] = 2;
yDimSize[1] = 3;
int yUnitNum = 1;
for (int i = 0; i < yOrder; i++)
yUnitNum *= yDimSize[i];
DTYPE xData[2][3] = { {0.0F, -1.0F, 2.0F}, DTYPE xData[2][3] = { {0.0F, -1.0F, 2.0F},
{3.0F, -4.0F, -5.0F} }; {3.0F, -4.0F, -5.0F} };
...@@ -58,52 +48,50 @@ bool TestRectify1() ...@@ -58,52 +48,50 @@ bool TestRectify1()
bool cpuTest = true; bool cpuTest = true;
/* create tensors */ /* create tensors */
XTensor * x = NewTensor(xOrder, xDimSize); XTensor * x = NewTensor(order, dimSize);
XTensor * y = NewTensor(yOrder, yDimSize); XTensor * y = NewTensor(order, dimSize);
/* initialize variables */ /* initialize variables */
x->SetData(xData, xUnitNum); x->SetData(xData, unitNum);
y->SetZeroAll(); y->SetZeroAll();
/* call Rectify function */ /* call Rectify function */
Rectify(x, y); Rectify(x, y);
/* check results */ /* check results */
cpuTest = y->CheckData(answer, yUnitNum); cpuTest = y->CheckData(answer, unitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
/* create tensor */ /* create tensor */
XTensor * xGPU = NewTensor(xOrder, xDimSize, X_FLOAT, 1.0F, 0); XTensor * xGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(yOrder, yDimSize, X_FLOAT, 1.0F, 0); XTensor * yGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* Initialize variables */ /* Initialize variables */
xGPU->SetData(xData, xUnitNum); xGPU->SetData(xData, unitNum);
yGPU->SetZeroAll(); yGPU->SetZeroAll();
/* call Rectify function */ /* call Rectify function */
Rectify(xGPU, yGPU); Rectify(xGPU, yGPU);
/* check results */ /* check results */
gpuTest = yGPU->CheckData(answer, yUnitNum); gpuTest = yGPU->CheckData(answer, unitNum);
/* destroy variables */ /* destroy variables */
delete x; delete x;
delete y; delete y;
delete xGPU; delete xGPU;
delete yGPU; delete yGPU;
delete[] xDimSize; delete[] dimSize;
delete[] yDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete x; delete x;
delete y; delete y;
delete[] xDimSize; delete[] dimSize;
delete[] yDimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
...@@ -117,73 +105,83 @@ In this case, lossName=CROSSENTROPY. ...@@ -117,73 +105,83 @@ In this case, lossName=CROSSENTROPY.
*/ */
bool TestRectify2() bool TestRectify2()
{ {
/* a x tensor of size (2, 3) */ /* a tensor of size (2, 3) */
int xOrder = 2; int order = 2;
int * xDimSize = new int[xOrder]; int * dimSize = new int[order];
xDimSize[0] = 2; dimSize[0] = 2;
xDimSize[1] = 3; dimSize[1] = 3;
int xUnitNum = 1; int unitNum = 1;
for (int i = 0; i < xOrder; i++) for (int i = 0; i < order; i++)
xUnitNum *= xDimSize[i]; unitNum *= dimSize[i];
DTYPE xData[2][3] = { {1.0F, 1.0F, 2.0F}, DTYPE xData[2][3] = { {1.0F, 1.0F, 2.0F},
{2.0F, 4.0F, 5.0F} }; {2.0F, 4.0F, 5.0F} };
DTYPE yData[2][3] = { {1.0F, 1.0F, 2.0F},
{2.0F, 4.0F, 5.0F} };
DTYPE goldData[2][3] = { {1.0F, 1.0F, 1.0F}, DTYPE goldData[2][3] = { {1.0F, 1.0F, 1.0F},
{1.0F, 1.0F, 1.0F} }; {1.0F, 1.0F, 1.0F} };
DTYPE dedyData[2][3] = { {-1.0F, -1.0F, -0.5F}, DTYPE yAnswer[2][3] = { {1.0F, 1.0F, 2.0F},
{2.0F, 4.0F, 5.0F} };
DTYPE dedyAnswer[2][3] = { {-1.0F, -1.0F, -0.5F},
{-0.5F, -0.25F, -0.2F} }; {-0.5F, -0.25F, -0.2F} };
DTYPE answer[2][3] = { {-1.0F, -1.0F, -0.5F}, DTYPE dedxAnswer[2][3] = { {-1.0F, -1.0F, -0.5F},
{-0.5F, -0.25F, -0.2F} }; {-0.5F, -0.25F, -0.2F} };
/* CPU test */ /* CPU test */
bool cpuTest = true; bool cpuTest = true;
/* create tensors */ /* create tensors */
XTensor * x = NewTensor(xOrder, xDimSize); XTensor * x = NewTensor(order, dimSize);
XTensor * y = NewTensor(xOrder, xDimSize); XTensor * y = NewTensor(order, dimSize);
XTensor * gold = NewTensor(xOrder, xDimSize); XTensor * gold = NewTensor(order, dimSize);
XTensor * dedy = NewTensor(xOrder, xDimSize); XTensor * dedy = NewTensor(order, dimSize);
XTensor * dedx = NewTensor(xOrder, xDimSize); XTensor * dedx = NewTensor(order, dimSize);
/* initialize variables */ /* initialize variables */
x->SetData(xData, xUnitNum); x->SetData(xData, unitNum);
y->SetData(yData, xUnitNum); gold->SetData(goldData, unitNum);
gold->SetData(goldData, xUnitNum); y->SetZeroAll();
dedy->SetData(dedyData, xUnitNum); dedy->SetZeroAll();
dedx->SetZeroAll(); dedx->SetZeroAll();
/* call Rectify function */
Rectify(x, y);
/* call RectifyBackward function */ /* call RectifyBackward function */
RectifyBackward(gold, y, x, dedy, dedx, NOLOSS); RectifyBackward(gold, y, x, dedy, dedx, CROSSENTROPY);
/* check results */ /* check results */
cpuTest = dedx->CheckData(answer, xUnitNum); cpuTest = y->CheckData(yAnswer, unitNum, 1e-4F)
&& dedx->CheckData(dedxAnswer, unitNum, 1e-4F)
&& dedy->CheckData(dedyAnswer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
/* create tensors */ /* create tensors */
XTensor * xGPU = NewTensor(xOrder, xDimSize, X_FLOAT, 1.0F, 0); XTensor * xGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(xOrder, xDimSize, X_FLOAT, 1.0F, 0); XTensor * yGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * goldGPU = NewTensor(xOrder, xDimSize, X_FLOAT, 1.0F, 0); XTensor * goldGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(xOrder, xDimSize, X_FLOAT, 1.0F, 0); XTensor * dedyGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(xOrder, xDimSize, X_FLOAT, 1.0F, 0); XTensor * dedxGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */ /* initialize variables */
xGPU->SetData(xData, xUnitNum); xGPU->SetData(xData, unitNum);
yGPU->SetData(yData, xUnitNum); goldGPU->SetData(goldData, unitNum);
goldGPU->SetData(goldData, xUnitNum); yGPU->SetZeroAll();
dedyGPU->SetData(dedyData, xUnitNum); dedyGPU->SetZeroAll();
dedxGPU->SetZeroAll(); dedxGPU->SetZeroAll();
/* call Rectify function */
Rectify(xGPU, yGPU);
/* call rectifybackward function */ /* call rectifybackward function */
RectifyBackward(goldGPU, yGPU, xGPU, dedyGPU, dedxGPU, NOLOSS); RectifyBackward(goldGPU, yGPU, xGPU, dedyGPU, dedxGPU, CROSSENTROPY);
/* check results */ /* check results */
gpuTest = dedxGPU->CheckData(answer, xUnitNum); gpuTest = yGPU->CheckData(yAnswer, unitNum, 1e-4F)
&& dedxGPU->CheckData(dedxAnswer, unitNum, 1e-4F)
&& dedyGPU->CheckData(dedyAnswer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
...@@ -196,7 +194,7 @@ bool TestRectify2() ...@@ -196,7 +194,7 @@ bool TestRectify2()
delete dedyGPU; delete dedyGPU;
delete dedxGPU; delete dedxGPU;
delete goldGPU; delete goldGPU;
delete[] xDimSize; delete[] dimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
...@@ -206,7 +204,7 @@ bool TestRectify2() ...@@ -206,7 +204,7 @@ bool TestRectify2()
delete dedy; delete dedy;
delete dedx; delete dedx;
delete gold; delete gold;
delete[] xDimSize; delete[] dimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
...@@ -220,7 +218,7 @@ TODO!! ...@@ -220,7 +218,7 @@ TODO!!
/* test for Rectify Function */ /* test for Rectify Function */
bool TestRectify() bool TestRectify()
{ {
XPRINT(0, stdout, "[TEST RECTIFY] test rectify and its backward computation \n"); XPRINT(0, stdout, "[TEST RECTIFY] rectify function and its backward computation \n");
bool returnFlag = true, caseFlag = true; bool returnFlag = true, caseFlag = true;
/* case 1 test */ /* case 1 test */
......
...@@ -23,8 +23,7 @@ ...@@ -23,8 +23,7 @@
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: set the cell to the ascending order along a given dimension. /* case 1: set the cell to the ascending order along a given dimension. */
*/
bool TestSetAscendingOrder1() bool TestSetAscendingOrder1()
{ {
/* a input tensor of size (2, 4) */ /* a input tensor of size (2, 4) */
...@@ -50,7 +49,6 @@ bool TestSetAscendingOrder1() ...@@ -50,7 +49,6 @@ bool TestSetAscendingOrder1()
s->SetZeroAll(); s->SetZeroAll();
/* call SetAscendingOrder function */ /* call SetAscendingOrder function */
s->SetAscendingOrder(1); s->SetAscendingOrder(1);
/* check results */ /* check results */
......
...@@ -23,7 +23,10 @@ ...@@ -23,7 +23,10 @@
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: set the cell to the ascending order along a given dimension. */ /*
case 1: test SetDataRand function.
set the tensor items by a uniform distribution in range [lower, upper].
*/
bool TestSetData1() bool TestSetData1()
{ {
/* a input tensor of size (2, 4) */ /* a input tensor of size (2, 4) */
...@@ -44,7 +47,7 @@ bool TestSetData1() ...@@ -44,7 +47,7 @@ bool TestSetData1()
/* create tensors */ /* create tensors */
XTensor * s = NewTensor(sOrder, sDimSize); XTensor * s = NewTensor(sOrder, sDimSize);
/* call SetData function */ /* call SetDataRand function */
s->SetDataRand(0.0, 1.0); s->SetDataRand(0.0, 1.0);
/* check results */ /* check results */
......
...@@ -25,102 +25,71 @@ ...@@ -25,102 +25,71 @@
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* /*
case 1: test Sigmoid function and SigmoidBackward function. case 1: test Sigmoid function.
sigmoid function: y = 1/(1+exp(-x)) sigmoid function: y = 1/(1+exp(-x))
backward computation: dE/ds = dE/dy * dy/dx
*/ */
bool TestSigmoid1() bool TestSigmoid1()
{ {
/* a input tensor of size (3) */ /* a input tensor of size (3) */
int sOrder = 1; int order = 1;
int * sDimSize = new int[sOrder]; int * dimSize = new int[order];
sDimSize[0] = 3; dimSize[0] = 3;
int sUnitNum = 1; int unitNum = 1;
for (int i = 0; i < sOrder; i++) for (int i = 0; i < order; i++)
sUnitNum *= sDimSize[i]; unitNum *= dimSize[i];
DTYPE xData[3] = {0.0F, 1.0F, 2.0F}; DTYPE xData[3] = {0.0F, 1.0F, 2.0F};
DTYPE gData[3] = {0.4F, 0.8F, 1.0F}; DTYPE answer[3] = {0.5F, 0.7311F, 0.8808F};
DTYPE dedyData[3] = {-0.8F, -1.094F, -1.135F};
DTYPE yAnswer[3] = {0.5F, 0.731F, 0.881F};
DTYPE dedxAnswer[3] = {-0.2F, -0.215F, -0.119F};
/* CPU test */ /* CPU test */
bool cpuTest = true; bool cpuTest = true;
/* create tensors */ /* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize); XTensor * x = NewTensor(order, dimSize);
XTensor * y = NewTensor(sOrder, sDimSize); XTensor * y = NewTensor(order, dimSize);
XTensor * g = NewTensor(sOrder, sDimSize);
XTensor * dedy = NewTensor(sOrder, sDimSize);
XTensor * dedx = NewTensor(sOrder, sDimSize);
/* initialize variables */ /* initialize variables */
x->SetData(xData, sUnitNum); x->SetData(xData, unitNum);
g->SetData(gData, sUnitNum);
dedy->SetData(dedyData, sUnitNum);
y->SetZeroAll(); y->SetZeroAll();
dedx->SetZeroAll();
/* call Sigmoid function */ /* call Sigmoid function */
Sigmoid(x, y); Sigmoid(x, y);
/* call SigmoidBackward function */
SigmoidBackward(g, y, x, dedy, dedx, NOLOSS);
/* check result */ /* check result */
cpuTest = y->CheckData(yAnswer, sUnitNum, 0.001F) && dedx->CheckData(dedxAnswer, sUnitNum, 0.001F); cpuTest = y->CheckData(answer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
/* create tensors */ /* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * xGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * yGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * gGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */ /* initialize variables */
xGPU->SetData(xData, sUnitNum); xGPU->SetData(xData, unitNum);
gGPU->SetData(gData, sUnitNum);
dedyGPU->SetData(dedyData, sUnitNum);
yGPU->SetZeroAll(); yGPU->SetZeroAll();
dedxGPU->SetZeroAll();
/* call Sigmoid function */ /* call Sigmoid function */
Sigmoid(xGPU, yGPU); Sigmoid(xGPU, yGPU);
/* call SigmoidBackward function */
SigmoidBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, NOLOSS);
/* check result */ /* check result */
gpuTest = yGPU->CheckData(yAnswer, sUnitNum, 0.001F) && dedxGPU->CheckData(dedxAnswer, sUnitNum, 0.001F); gpuTest = yGPU->CheckData(answer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
delete y; delete y;
delete g;
delete dedx;
delete dedy;
delete xGPU; delete xGPU;
delete yGPU; delete yGPU;
delete gGPU; delete[] dimSize;
delete dedxGPU;
delete dedyGPU;
delete[] sDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete x; delete x;
delete y; delete y;
delete g; delete[] dimSize;
delete dedx;
delete dedy;
delete[] sDimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
...@@ -129,70 +98,72 @@ bool TestSigmoid1() ...@@ -129,70 +98,72 @@ bool TestSigmoid1()
/* /*
case 2: test Sigmoid function and SigmoidBackward function. case 2: test Sigmoid function and SigmoidBackward function.
sigmoid function: y = 1/(1+exp(-x)) sigmoid function: y = 1/(1+exp(-x))
backward computation: dE/ds = dE/dy * dy/dx backward computation:
dE/ds = dE/dy * dy/dx
dy/dx = y * (1 -y)
In this case, LossName=CROSSENTROPY.
*/ */
bool TestSigmoid2() bool TestSigmoid2()
{ {
/* a input tensor of size (3) */ /* a input tensor of size (3) */
int sOrder = 1; int order = 1;
int * sDimSize = new int[sOrder]; int * dimSize = new int[order];
sDimSize[0] = 3; dimSize[0] = 3;
int sUnitNum = 1; int unitNum = 1;
for (int i = 0; i < sOrder; i++) for (int i = 0; i < order; i++)
sUnitNum *= sDimSize[i]; unitNum *= dimSize[i];
DTYPE xData[3] = {0.0F, 1.0F, 2.0F}; DTYPE xData[3] = {0.0F, 1.0F, 2.0F};
DTYPE gData[3] = {0.4F, 0.8F, 1.0F}; DTYPE gData[3] = {0.4F, 0.8F, 1.0F};
DTYPE dedyData[3] = {-0.8F, -1.094F, -1.135F}; DTYPE yAnswer[3] = {0.5F, 0.7311F, 0.8808F};
DTYPE yAnswer[3] = {0.5F, 0.731F, 0.881F}; DTYPE dedyAnswer[3] = {-0.8F, -1.0943F, -1.1353F};
DTYPE dedxAnswer[3] = {-0.2F, -0.215F, -0.119F}; DTYPE dedxAnswer[3] = {-0.2F, -0.2151F, -0.1192F};
/* CPU test */ /* CPU test */
bool cpuTest = true; bool cpuTest = true;
/* create tensors */ /* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize); XTensor * x = NewTensor(order, dimSize);
XTensor * y = NewTensor(sOrder, sDimSize); XTensor * y = NewTensor(order, dimSize);
XTensor * g = NewTensor(sOrder, sDimSize); XTensor * g = NewTensor(order, dimSize);
XTensor * dedy = NewTensor(sOrder, sDimSize); XTensor * dedy = NewTensor(order, dimSize);
XTensor * dedx = NewTensor(sOrder, sDimSize); XTensor * dedx = NewTensor(order, dimSize);
/* initialize variables */ /* initialize variables */
x->SetData(xData, sUnitNum); x->SetData(xData, unitNum);
g->SetData(gData, sUnitNum); g->SetData(gData, unitNum);
dedy->SetZeroAll();
y->SetZeroAll(); y->SetZeroAll();
dedy->SetZeroAll();
dedx->SetZeroAll(); dedx->SetZeroAll();
/* call Sigmoid function */ /* call Sigmoid function */
Sigmoid(x, y); Sigmoid(x, y);
/* initialize variables */
dedy->SetData(dedyData, sUnitNum);
/* call SigmoidBackward function */ /* call SigmoidBackward function */
SigmoidBackward(g, y, x, dedy, dedx, CROSSENTROPY); SigmoidBackward(g, y, x, dedy, dedx, CROSSENTROPY);
/* check result */ /* check result */
cpuTest = y->CheckData(yAnswer, sUnitNum) && dedx->CheckData(dedxAnswer, sUnitNum); cpuTest = y->CheckData(yAnswer, unitNum, 1e-4F)
&& dedx->CheckData(dedxAnswer, unitNum, 1e-4F)
&& dedy->CheckData(dedyAnswer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
/* create tensors */ /* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * xGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * yGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * gGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * gGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * dedyGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * dedxGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */ /* initialize variables */
xGPU->SetData(xData, sUnitNum); xGPU->SetData(xData, unitNum);
gGPU->SetData(gData, sUnitNum); gGPU->SetData(gData, unitNum);
dedyGPU->SetZeroAll();
yGPU->SetZeroAll(); yGPU->SetZeroAll();
dedyGPU->SetZeroAll();
dedxGPU->SetZeroAll(); dedxGPU->SetZeroAll();
/* call Sigmoid function */ /* call Sigmoid function */
...@@ -202,8 +173,9 @@ bool TestSigmoid2() ...@@ -202,8 +173,9 @@ bool TestSigmoid2()
SigmoidBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, CROSSENTROPY); SigmoidBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, CROSSENTROPY);
/* check result */ /* check result */
gpuTest = yGPU->CheckData(yAnswer, sUnitNum) && dedxGPU->CheckData(dedxAnswer, sUnitNum); gpuTest = yGPU->CheckData(yAnswer, unitNum, 1e-4F)
&& dedxGPU->CheckData(dedxAnswer, unitNum, 1e-4F)
&& dedyGPU->CheckData(dedyAnswer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
delete y; delete y;
...@@ -215,7 +187,7 @@ bool TestSigmoid2() ...@@ -215,7 +187,7 @@ bool TestSigmoid2()
delete gGPU; delete gGPU;
delete dedxGPU; delete dedxGPU;
delete dedyGPU; delete dedyGPU;
delete[] sDimSize; delete[] dimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
...@@ -225,7 +197,7 @@ bool TestSigmoid2() ...@@ -225,7 +197,7 @@ bool TestSigmoid2()
delete g; delete g;
delete dedx; delete dedx;
delete dedy; delete dedy;
delete[] sDimSize; delete[] dimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
...@@ -252,6 +224,16 @@ bool TestSigmoid() ...@@ -252,6 +224,16 @@ bool TestSigmoid()
else else
XPRINT(0, stdout, ">> case 1 passed!\n"); XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestSigmoid2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 2 failed!\n");
}
else
XPRINT(0, stdout, ">> case 2 passed!\n");
/* other cases test */ /* other cases test */
/* /*
TODO!! TODO!!
......
...@@ -31,68 +31,69 @@ softmax function: y = e^x / \sum_{i} e^{x_i} ...@@ -31,68 +31,69 @@ softmax function: y = e^x / \sum_{i} e^{x_i}
*/ */
bool TestSoftmax1() bool TestSoftmax1()
{ {
/* a input tensor of size (2, 3) */ /* a tensor of size (2, 3) */
int sOrder = 2; int order = 2;
int * sDimSize = new int[sOrder]; int * dimSize = new int[order];
sDimSize[0] = 2; dimSize[0] = 2;
sDimSize[1] = 3; dimSize[1] = 3;
int sUnitNum = 1; int unitNum = 1;
for (int i = 0; i < sOrder; i++) for (int i = 0; i < order; i++)
sUnitNum *= sDimSize[i]; unitNum *= dimSize[i];
DTYPE xData[2][3] = { {0.0F, 1.0F, 2.0F}, DTYPE xData[2][3] = { {0.0F, 1.0F, 2.0F},
{0.5F, 0.7F, 1.4F} }; {0.5F, 0.7F, 1.4F} };
DTYPE answer[2][3] = { {0.09003057F, 0.24472848F, 0.66524094F}, DTYPE answer[2][3] = { {0.0900F, 0.2447F, 0.6652F},
{0.21362929F, 0.2609274F , 0.52544326F} }; {0.2136F, 0.2609F, 0.5254F} };
/* CPU test */ /* CPU test */
bool cpuTest = true; bool cpuTest = true;
/* create tensors */ /* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize); XTensor * x = NewTensor(order, dimSize);
XTensor * y = NewTensor(sOrder, sDimSize); XTensor * y = NewTensor(order, dimSize);
/* initialize variables */ /* initialize variables */
x->SetData(xData, sUnitNum); x->SetData(xData, unitNum);
y->SetZeroAll(); y->SetZeroAll();
/* call Softmax function */ /* call Softmax function */
Softmax(x, y, 1); Softmax(x, y, 1);
/* check result */ /* check result */
cpuTest = y->CheckData(answer, sUnitNum); cpuTest = y->CheckData(answer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
/* create tensors */ /* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * xGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * yGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */ /* initialize variables */
xGPU->SetData(xData, sUnitNum); xGPU->SetData(xData, unitNum);
yGPU->SetZeroAll(); yGPU->SetZeroAll();
/* call Softmax function */ /* call Softmax function */
Softmax(xGPU, yGPU, 1); Softmax(xGPU, yGPU, 1);
/* check result */ /* check result */
gpuTest = yGPU->CheckData(answer, sUnitNum, 0.001F); gpuTest = yGPU->CheckData(answer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
delete y; delete y;
delete xGPU; delete xGPU;
delete yGPU; delete yGPU;
delete[] sDimSize; delete[] dimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete x, y; delete x;
delete[] sDimSize; delete y;
delete[] dimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
...@@ -101,36 +102,38 @@ bool TestSoftmax1() ...@@ -101,36 +102,38 @@ bool TestSoftmax1()
/* /*
case 2: test SoftmaxBackward function. case 2: test SoftmaxBackward function.
SoftmaxBackward function: dE/dx_j = -gold_j + y_j SoftmaxBackward function: dE/dx_j = -gold_j + y_j
In this case, LossName=CROSSENTROPY.
*/ */
bool TestSoftmax2() bool TestSoftmax2()
{ {
/* a input tensor of size (2, 3) */ /* a input tensor of size (2, 3) */
int sOrder = 2; int order = 2;
int * sDimSize = new int[sOrder]; int * dimSize = new int[order];
sDimSize[0] = 1; dimSize[0] = 1;
sDimSize[1] = 3; dimSize[1] = 3;
int sUnitNum = 1; int unitNum = 1;
for (int i = 0; i < sOrder; i++) for (int i = 0; i < order; i++)
sUnitNum *= sDimSize[i]; unitNum *= dimSize[i];
DTYPE xData[1][3] = { {0.0F, 1.0F, 2.0F} }; DTYPE xData[1][3] = { {0.0F, 1.0F, 2.0F} };
DTYPE gData[1][3] = { {0.0F, 0.0F, 1.0F} }; DTYPE gData[1][3] = { {0.0F, 0.0F, 1.0F} };
DTYPE dedxAnswer[3] = {0.090031F, 0.244728F, -0.334759F}; DTYPE yAnswer[1][3] = { {0.0900F, 0.2447F, 0.6652F} };
DTYPE dedxAnswer[1][3] = {0.0900F, 0.2447F, -0.3347F};
/* CPU test */ /* CPU test */
bool cpuTest = true; bool cpuTest = true;
/* create tensors */ /* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize); XTensor * x = NewTensor(order, dimSize);
XTensor * y = NewTensor(sOrder, sDimSize); XTensor * y = NewTensor(order, dimSize);
XTensor * g = NewTensor(sOrder, sDimSize); XTensor * g = NewTensor(order, dimSize);
XTensor * dedy = NewTensor(sOrder, sDimSize); XTensor * dedy = NewTensor(order, dimSize);
XTensor * dedx = NewTensor(sOrder, sDimSize); XTensor * dedx = NewTensor(order, dimSize);
/* initialize variables */ /* initialize variables */
x->SetData(xData, sUnitNum); x->SetData(xData, unitNum);
g->SetData(gData, sUnitNum); g->SetData(gData, unitNum);
y->SetZeroAll(); y->SetZeroAll();
dedx->SetZeroAll(); dedx->SetZeroAll();
dedy->SetZeroAll(); dedy->SetZeroAll();
...@@ -138,25 +141,27 @@ bool TestSoftmax2() ...@@ -138,25 +141,27 @@ bool TestSoftmax2()
/* call Softmax function */ /* call Softmax function */
Softmax(x, y, 1); Softmax(x, y, 1);
/* call SoftmaxBackward function */
SoftmaxBackward(g, y, x, dedy, dedx, 1, CROSSENTROPY); SoftmaxBackward(g, y, x, dedy, dedx, 1, CROSSENTROPY);
/* check result */ /* check result */
cpuTest = dedx->CheckData(dedxAnswer, sUnitNum, 0.001F); cpuTest = y->CheckData(yAnswer, unitNum, 1e-4F)
&& dedx->CheckData(dedxAnswer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
/* create tensors */ /* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * xGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * yGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * gGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * gGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * dedyGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * dedxGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */ /* initialize variables */
xGPU->SetData(xData, sUnitNum); xGPU->SetData(xData, unitNum);
gGPU->SetData(gData, sUnitNum); gGPU->SetData(gData, unitNum);
yGPU->SetZeroAll(); yGPU->SetZeroAll();
dedxGPU->SetZeroAll(); dedxGPU->SetZeroAll();
dedyGPU->SetZeroAll(); dedyGPU->SetZeroAll();
...@@ -168,7 +173,8 @@ bool TestSoftmax2() ...@@ -168,7 +173,8 @@ bool TestSoftmax2()
SoftmaxBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, 1, CROSSENTROPY); SoftmaxBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, 1, CROSSENTROPY);
/* check result */ /* check result */
gpuTest = dedxGPU->CheckData(dedxAnswer, sUnitNum, 0.001F); gpuTest = yGPU->CheckData(yAnswer, unitNum, 1e-4F)
&& dedxGPU->CheckData(dedxAnswer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
...@@ -181,7 +187,7 @@ bool TestSoftmax2() ...@@ -181,7 +187,7 @@ bool TestSoftmax2()
delete gGPU; delete gGPU;
delete dedxGPU; delete dedxGPU;
delete dedyGPU; delete dedyGPU;
delete[] sDimSize; delete[] dimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
...@@ -191,7 +197,7 @@ bool TestSoftmax2() ...@@ -191,7 +197,7 @@ bool TestSoftmax2()
delete g; delete g;
delete dedx; delete dedx;
delete dedy; delete dedy;
delete[] sDimSize; delete[] dimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
......
...@@ -181,14 +181,20 @@ bool TestSplit2() ...@@ -181,14 +181,20 @@ bool TestSplit2()
gpuTest = tGPU->CheckData(answer, tUnitNum); gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s, t, sGPU, tGPU; delete s;
delete[] sDimSize, tDimSize; delete t;
delete sGPU;
delete tGPU;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete s, t; delete s;
delete[] sDimSize, tDimSize; delete t;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
...@@ -295,14 +301,25 @@ bool TestSplit3() ...@@ -295,14 +301,25 @@ bool TestSplit3()
gpuTest = tGPU1->CheckData(answer1, tUnitNum1) && tGPU2->CheckData(answer2, tUnitNum2); gpuTest = tGPU1->CheckData(answer1, tUnitNum1) && tGPU2->CheckData(answer2, tUnitNum2);
/* destroy variables */ /* destroy variables */
delete s, t1, t2, sGPU, tGPU1, tGPU2; delete s;
delete[] sDimSize, tDimSize1, tDimSize2; delete t1;
delete t2;
delete sGPU;
delete tGPU1;
delete tGPU2;
delete[] sDimSize;
delete[] tDimSize1;
delete[] tDimSize2;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete s, t1, t2; delete s;
delete[] sDimSize, tDimSize1, tDimSize2; delete t1;
delete t2;
delete[] sDimSize;
delete[] tDimSize1;
delete[] tDimSize2;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论