Commit 36135d25 by liyinqiao Committed by 李垠桥

Test reduce functions on scalar tensor.

parent 2e1453f4
......@@ -141,6 +141,90 @@ bool TestReduceMax1()
#endif // USE_CUDA
}
/*
case 2: get the max value of the items along a dimension of the scalar tensor.
In this case,
(4) -> scalar, dim = 0
*/
bool TestReduceMax2()
{
/* a input tensor of size (4) */
int sOrder = 1;
int * sDimSize = new int[sOrder];
sDimSize[0] = 4;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a output scalar tensor */
int tOrder = 0;
int * tDimSize = new int[MAX_TENSOR_DIM_NUM];
int tUnitNum = 1;
DTYPE sData[4] = {0.0F, 5.0F, 2.0F, 3.0F};
DTYPE answer[1] = {5.0F};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s = NewTensorV2(sOrder, sDimSize);
XTensor * t = NewTensorV2(tOrder, tDimSize);
XTensor tUser;
/* initialize variables */
s->SetData(sData, sUnitNum);
t->SetZeroAll();
/* call ReduceMax function */
_ReduceMax(s, t, 0);
tUser = ReduceMax(*s, 0);
/* check results */
cpuTest = _CheckData(t, answer, tUnitNum) && _CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * sGPU = NewTensorV2(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensorV2(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
XTensor tUserGPU;
/* initialize variables */
sGPU->SetData(sData, sUnitNum);
tGPU->SetZeroAll();
tGPU->SetZeroAll();
/* call ReduceMax function */
_ReduceMax(sGPU, tGPU, 0);
tUserGPU = ReduceMax(*sGPU, 0);
/* check results */
gpuTest = _CheckData(tGPU, answer, tUnitNum) && _CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */
delete s;
delete t;
delete sGPU;
delete tGPU;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s;
delete t;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
......@@ -161,6 +245,15 @@ bool TestReduceMax()
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestReduceMax2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 2 failed!\n");
}
else
XPRINT(0, stdout, ">> case 2 passed!\n");
/* other cases test */
/*
TODO!!
......
......@@ -136,6 +136,85 @@ bool TestReduceMean1()
#endif // USE_CUDA
}
/* case 2: get the mean value along a dimension of the scalar tensor */
bool TestReduceMean2()
{
/* a tensor of size (4) */
int sOrder = 1;
int * sDimSize = new int[sOrder];
sDimSize[0] = 4;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a scalar tensor */
int tOrder = 0;
int * tDimSize = new int[MAX_TENSOR_DIM_NUM];
int tUnitNum = 1;
DTYPE sData[4] = {0.0F, 1.0F, 2.0F, 3.0F};
DTYPE answer[1] = {1.5F};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s = NewTensorV2(sOrder, sDimSize);
XTensor * t = NewTensorV2(tOrder, tDimSize);
XTensor tUser;
/* initialize variables */
s->SetData(sData, sUnitNum);
t->SetZeroAll();
/* call ReduceMean function */
_ReduceMean(s, t, 0);
tUser = ReduceMean(*s, 0);
/* check results */
cpuTest = _CheckData(t, answer, tUnitNum) && _CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensor */
XTensor * sGPU = NewTensorV2(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensorV2(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
XTensor tUserGPU;
/* Initialize variables */
sGPU->SetData(sData, sUnitNum);
tGPU->SetZeroAll();
/* call ReduceMean function */
_ReduceMean(sGPU, tGPU, 0);
tUserGPU = ReduceMean(*sGPU, 0);
/* check results */
gpuTest = _CheckData(tGPU, answer, tUnitNum) && _CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */
delete s;
delete t;
delete sGPU;
delete tGPU;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s;
delete t;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
......@@ -156,6 +235,15 @@ bool TestReduceMean()
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestReduceMean2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 2 failed!\n");
}
else
XPRINT(0, stdout, ">> case 2 passed!\n");
///* other cases test */
///*
//TODO!!
......
......@@ -240,6 +240,104 @@ bool TestReduceSumSquared2()
#endif // USE_CUDA
}
/*
case 3: squared sum of the items along a dimension of the scalar tensor.
For a 1-dimensional data array a, sum = \sum_i (a_i - shift)^2.
In this case, (4) -> scalar, dim = 0.
*/
bool TestReduceSumSquared3()
{
/* a input tensor of size (4) */
int sOrder = 1;
int * sDimSize = new int[sOrder];
sDimSize[0] = 4;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a output scalar tensor */
int tOrder = 0;
int * tDimSize = new int[MAX_TENSOR_DIM_NUM];
int tUnitNum = 1;
/* a shift tensor of size (1) */
int shiftOrder = 0;
int * shiftDimSize = new int[MAX_TENSOR_DIM_NUM];
int shiftUnitNum = 1;
DTYPE sData[4] = {0.0F, 1.0F, 2.0F, 3.0F};
DTYPE shiftData[1] = {-1.0F};
DTYPE answer[1] = {30.0F};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s = NewTensorV2(sOrder, sDimSize);
XTensor * t = NewTensorV2(tOrder, tDimSize);
XTensor * shift = NewTensorV2(shiftOrder, shiftDimSize);
XTensor tUser;
/* initialize variables */
s->SetData(sData, sUnitNum);
shift->SetData(shiftData, shiftUnitNum);
t->SetZeroAll();
/* call ReduceSumSquared function */
_ReduceSumSquared(s, t, 0, shift);
tUser = ReduceSumSquared(*s, 0, *shift);
/* check results */
cpuTest = _CheckData(t, answer, tUnitNum) && _CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * sGPU = NewTensorV2(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensorV2(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
XTensor * shiftGPU = NewTensorV2(shiftOrder, shiftDimSize, X_FLOAT, 1.0F, 0);
XTensor tUserGPU;
/* initialize variables */
sGPU->SetData(sData, sUnitNum);
shiftGPU->SetData(shiftData, shiftUnitNum);
tGPU->SetZeroAll();
/* call ReduceSumSquared function */
_ReduceSumSquared(sGPU, tGPU, 0, shiftGPU);
tUserGPU = ReduceSumSquared(*sGPU, 0, *shiftGPU);
/* check results */
gpuTest = _CheckData(tGPU, answer, tUnitNum) && _CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */
delete s;
delete t;
delete shift;
delete sGPU;
delete tGPU;
delete shiftGPU;
delete[] sDimSize;
delete[] tDimSize;
delete[] shiftDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s;
delete t;
delete shift;
delete[] sDimSize;
delete[] tDimSize;
delete[] shiftDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
......@@ -264,10 +362,19 @@ bool TestReduceSumSquared()
caseFlag = TestReduceSumSquared2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
XPRINT(0, stdout, ">> case 2 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
XPRINT(0, stdout, ">> case 2 passed!\n");
/* case 3 test */
caseFlag = TestReduceSumSquared3();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 3 failed!\n");
}
else
XPRINT(0, stdout, ">> case 3 passed!\n");
/* other cases test */
/*
......
......@@ -132,6 +132,104 @@ bool TestReduceVariance1()
#endif // USE_CUDA
}
/*
case 2: variance of the items along a dimension of the scalar tensor.
For a 1-dimensional data array a, variance = 1/n * \sum_i (a_i - mean)^2.
In this case, (4) -> scalar, dim = 0.
*/
bool TestReduceVariance2()
{
/* a input tensor of size (4) */
int sOrder = 1;
int * sDimSize = new int[sOrder];
sDimSize[0] = 4;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a output scalar tensor */
int tOrder = 0;
int * tDimSize = new int[MAX_TENSOR_DIM_NUM];
int tUnitNum = 1;
/* a mean scalar tensor */
int meanOrder = 0;
int * meanDimSize = new int[MAX_TENSOR_DIM_NUM];
int meanUnitNum = 1;
DTYPE sData[4] = {0.0F, 1.0F, 2.0F, 3.0F};
DTYPE meanData[1] = {1.5F};
DTYPE answer[1] = {1.25F};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s = NewTensorV2(sOrder, sDimSize);
XTensor * t = NewTensorV2(tOrder, tDimSize);
XTensor * mean = NewTensorV2(meanOrder, meanDimSize);
XTensor tUser;
/* initialize variables */
s->SetData(sData, sUnitNum);
mean->SetData(meanData, meanUnitNum);
t->SetZeroAll();
/* call ReduceVariance function */
_ReduceVariance(s, t, 0, mean);
tUser = ReduceVariance(*s, 0, *mean);
/* check results */
cpuTest = _CheckData(t, answer, tUnitNum) && _CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * sGPU = NewTensorV2(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensorV2(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
XTensor * meanGPU = NewTensorV2(meanOrder, meanDimSize, X_FLOAT, 1.0F, 0);
XTensor tUserGPU;
/* initialize variables */
sGPU->SetData(sData, sUnitNum);
meanGPU->SetData(meanData, meanUnitNum);
tGPU->SetZeroAll();
/* call ReduceVariance function */
_ReduceVariance(sGPU, tGPU, 0, meanGPU);
tUserGPU = ReduceVariance(*sGPU, 0, *meanGPU);
/* check results */
gpuTest = _CheckData(tGPU, answer, tUnitNum) && _CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */
delete s;
delete t;
delete mean;
delete sGPU;
delete tGPU;
delete meanGPU;
delete[] sDimSize;
delete[] tDimSize;
delete[] meanDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s;
delete t;
delete mean;
delete[] sDimSize;
delete[] tDimSize;
delete[] meanDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
......@@ -152,6 +250,15 @@ bool TestReduceVariance()
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestReduceVariance2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 2 failed!\n");
}
else
XPRINT(0, stdout, ">> case 2 passed!\n");
/* other cases test */
/*
TODO!!
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论