Commit 42f995ae by liyinqiao

Bug Fixed in test.

parent 100f4611
......@@ -377,7 +377,7 @@ void LossBackward(XTensor * dedy, XTensor * t, XTensor * y,
CheckNTErrors((tLen < y->unitNum), "Illegal input length!");
CheckNTErrors((XTensor::IsIdentical(t, y)&& XTensor::IsIdentical(dedy, y)),
"The input tensors must be of the same size!");
//CheckNTErrors((t->dimSizeRDI[0] == 1 && y->dimSizeRDI[0] == 1 && dedy->dimSizeRDI[0] == 1), "TODO!");
CheckNTErrors((t->dimSizeRDI[0] == 1 && y->dimSizeRDI[0] == 1 && dedy->dimSizeRDI[0] == 1), "TODO!");
CheckNTErrors((t->order > leadDim && leadDim >= 0), "Illegal leading dimension!");
CheckNTErrors((t->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE),
"TODO!");
......
......@@ -19,23 +19,19 @@
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-14
*/
#include "../XTensor.h"
#include "../XDevice.h"
#include "../core/ConcatenateSolely.h"
#include "TConcatenateSolely.h"
#include "../XList.h"
namespace nts { // namespace nt(NiuTrans.Tensor)
/* case 1: concatenate a list of tensors along a given dimension
* In this case, 2 * (2 * 1) -> (2 * 2), dim=1.
* In this case, 2 * (2, 1) -> (2, 2), dim=1.
*/
bool TestConcatenateSolely1()
{
/* create list */
XList sList;
sList = XList();
XList * sList = new XList();
/* a source tensor of size 2 * 1 */
/* a source tensor of size (2, 1) */
int sOrder1 = 2;
int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 2;
......@@ -45,7 +41,7 @@ bool TestConcatenateSolely1()
for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i];
/* a source tensor of size 2 * 1 */
/* a source tensor of size (2, 1) */
int sOrder2 = 2;
int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 2;
......@@ -55,7 +51,7 @@ bool TestConcatenateSolely1()
for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i];
/* a target tensor of size 2 * 2 */
/* a target tensor of size (2, 2) */
int tOrder = 2;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
......@@ -86,11 +82,11 @@ bool TestConcatenateSolely1()
t->SetZeroAll();
/* add tensors to list */
sList.Add(s1);
sList.Add(s2);
sList->Add(s1);
sList->Add(s2);
/* call concatenatesolely function */
ConcatenateSolely(&sList, t, 1);
/* call ConcatenateSolely function */
ConcatenateSolely(sList, t, 1);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
......@@ -99,9 +95,6 @@ bool TestConcatenateSolely1()
/* GPU test */
bool gpuTest = true;
/* clear list */
sList.Clear();
/* create tensor */
XTensor * sGPU1 = NewTensor(sOrder1, sDimSize1, X_FLOAT, 1.0F, 0);
XTensor * sGPU2 = NewTensor(sOrder2, sDimSize2, X_FLOAT, 1.0F, 0);
......@@ -111,40 +104,56 @@ bool TestConcatenateSolely1()
sGPU1->SetData(sData1, sUnitNum1);
sGPU2->SetData(sData2, sUnitNum2);
tGPU->SetZeroAll();
/* clear list */
sList->Clear();
/* add tensors to list*/
sList.Add(sGPU1);
sList.Add(sGPU2);
sList->Add(sGPU1);
sList->Add(sGPU2);
/* call concatenatesolely function */
ConcatenateSolely(&sList, tGPU, 1);
/* call ConcatenateSolely function */
ConcatenateSolely(sList, tGPU, 1);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s1, s2, t, sGPU1, sGPU2, tGPU;
delete[] sDimSize1, sDimSize2, tDimSize;
delete sList;
delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s1, s2, t;
delete[] sDimSize1, sDimSize2, tDimSize;
/* destroy variables */
delete sList;
delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 2: concatenate a list of tensors along a given dimension
* In this case, 2 * (2 * 1) -> (4 * 1), dim=0.
* In this case, 2 * (2, 1) -> (4, 1), dim=0.
*/
bool TestConcatenateSolely2()
{
/* create list */
XList sList;
sList = XList();
/* a source tensor of size 2 * 1 */
XList * sList = new XList();
/* a source tensor of size (2, 1) */
int sOrder1 = 2;
int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 2;
......@@ -154,7 +163,7 @@ bool TestConcatenateSolely2()
for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i];
/* a source tensor of size 2 * 1 */
/* a source tensor of size (2, 1) */
int sOrder2 = 2;
int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 2;
......@@ -164,7 +173,7 @@ bool TestConcatenateSolely2()
for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i];
/* a target tensor of size 4 * 1 */
/* a target tensor of size (4, 1) */
int tOrder = 2;
int * tDimSize = new int[tOrder];
tDimSize[0] = 4;
......@@ -197,11 +206,11 @@ bool TestConcatenateSolely2()
t->SetZeroAll();
/* add tensors to list */
sList.Add(s1);
sList.Add(s2);
sList->Add(s1);
sList->Add(s2);
/* call concatenatesolely function */
ConcatenateSolely(&sList, t, 0);
/* call ConcatenateSolely function */
ConcatenateSolely(sList, t, 0);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
......@@ -210,9 +219,6 @@ bool TestConcatenateSolely2()
/* GPU test */
bool gpuTest = true;
/* clear list */
sList.Clear();
/* create tensor */
XTensor * sGPU1 = NewTensor(sOrder1, sDimSize1, X_FLOAT, 1.0F, 0);
XTensor * sGPU2 = NewTensor(sOrder2, sDimSize2, X_FLOAT, 1.0F, 0);
......@@ -222,40 +228,56 @@ bool TestConcatenateSolely2()
sGPU1->SetData(sData1, sUnitNum1);
sGPU2->SetData(sData2, sUnitNum2);
tGPU->SetZeroAll();
/* clear list */
sList->Clear();
/* add tensors to list*/
sList.Add(sGPU1);
sList.Add(sGPU2);
sList->Add(sGPU1);
sList->Add(sGPU2);
/* call concatenatesolely function */
ConcatenateSolely(&sList, tGPU, 0);
ConcatenateSolely(sList, tGPU, 0);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s1, s2, t, sGPU1, sGPU2, tGPU;
delete[] sDimSize1, sDimSize2, tDimSize;
delete sList;
delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s1, s2, t;
delete[] sDimSize1, sDimSize2, tDimSize;
/* destroy variables */
delete sList;
delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 3: concatenate a list of tensors along a given dimension
* In this case, (2 * 1) + (2 * 2) -> (2 * 3), dim=1.
* In this case, (2, 1) + (2, 2) -> (2, 3), dim=1.
*/
bool TestConcatenateSolely3()
{
/* create list */
XList sList;
sList = XList();
/* a source tensor of size (2 * 1) */
XList * sList = new XList();
/* a source tensor of size (2, 1) */
int sOrder1 = 2;
int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 2;
......@@ -265,7 +287,7 @@ bool TestConcatenateSolely3()
for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i];
/* a source tensor of size (2 * 2) */
/* a source tensor of size (2, 2) */
int sOrder2 = 2;
int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 2;
......@@ -275,7 +297,7 @@ bool TestConcatenateSolely3()
for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i];
/* a target tensor of size (2 * 3) */
/* a target tensor of size (2, 3) */
int tOrder = 2;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
......@@ -306,11 +328,11 @@ bool TestConcatenateSolely3()
t->SetZeroAll();
/* add tensors to list */
sList.Add(s1);
sList.Add(s2);
sList->Add(s1);
sList->Add(s2);
/* call concatenatesolely function */
ConcatenateSolely(&sList, t, 1);
/* call ConcatenateSolely function */
ConcatenateSolely(sList, t, 1);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
......@@ -319,9 +341,6 @@ bool TestConcatenateSolely3()
/* GPU test */
bool gpuTest = true;
/* clear list */
sList.Clear();
/* create tensor */
XTensor * sGPU1 = NewTensor(sOrder1, sDimSize1, X_FLOAT, 1.0F, 0);
XTensor * sGPU2 = NewTensor(sOrder2, sDimSize2, X_FLOAT, 1.0F, 0);
......@@ -331,26 +350,42 @@ bool TestConcatenateSolely3()
sGPU1->SetData(sData1, sUnitNum1);
sGPU2->SetData(sData2, sUnitNum2);
tGPU->SetZeroAll();
/* clear list */
sList->Clear();
/* add tensors to list*/
sList.Add(sGPU1);
sList.Add(sGPU2);
sList->Add(sGPU1);
sList->Add(sGPU2);
/* call concatenatesolely function */
ConcatenateSolely(&sList, tGPU, 1);
/* call ConcatenateSolely function */
ConcatenateSolely(sList, tGPU, 1);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s1, s2, t, sGPU1, sGPU2, tGPU;
delete[] sDimSize1, sDimSize2, tDimSize;
delete sList;
delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s1, s2, t;
delete[] sDimSize1, sDimSize2, tDimSize;
/* destroy variables */
delete sList;
delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
......@@ -365,7 +400,7 @@ TODO!!
extern "C"
bool TestConcatenateSolely()
{
XPRINT(0, stdout, "[TEST CONCATENATESOLELY] -------------\n");
XPRINT(0, stdout, "[TEST CONCATENATESOLELY] concatenate a list of tensors along a given dimension \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
......
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#include "TCopyIndexed.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1 copy indexed sub-tensors
* In this case, (3, 2, 3) -> (3, 2, 2), dim = 2, indexSize = 2,
* srcIndex = [0, 2], tgtIndex = [0, 1], copyNum = 1.
*/
bool TestCopyIndexed1()
{
/* a input tensor of size (3, 2, 3) */
int sOrder = 3;
int * sDimSize = new int[sOrder];
sDimSize[0] = 3;
sDimSize[1] = 2;
sDimSize[2] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a output tensor of size (3, 2, 2) */
int tOrder = 3;
int * tDimSize = new int[tOrder];
tDimSize[0] = 3;
tDimSize[1] = 2;
tDimSize[2] = 2;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData[3][2][3] = { { {0.0, -1.0, 2.0},
{2.0, 1.0, 3.0} },
{ {1.0, 2.0, 4.0},
{3.0, 1.0, 2.0}},
{ {-1.0, 3.0, 2.0},
{1.0, -1.0, 0.0} } };
DTYPE answer[3][2][2] = { { {0.0, 2.0},
{2.0, 3.0} },
{ {1.0, 4.0},
{3.0, 2.0}},
{ {-1.0, 2.0},
{1.0, 0.0} } };
int dim = 2;
int indexSize = 2;
int srcIndex[2] = {0, 2};
int tgtIndex[2] = {0, 1};
int copyNum = 1;
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s->SetData(sData, sUnitNum);
t->SetZeroAll();
/* call CopyIndexed function */
CopyIndexed(s, t, dim, srcIndex, indexSize, tgtIndex, copyNum);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(sOrder, tDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
sGPU->SetData(sData, sUnitNum);
tGPU->SetZeroAll();
/* call CopyIndexed function */
CopyIndexed(sGPU, tGPU, dim, srcIndex, indexSize, tgtIndex, copyNum);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s;
delete t;
delete sGPU;
delete tGPU;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s;
delete t;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
*/
/* test for CopyIndexed Function */
extern "C"
bool TestCopyIndexed()
{
XPRINT(0, stdout, "[TEST CopyIndexed] copy indexed sub-tensors \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
caseFlag = TestCopyIndexed1();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* other cases test */
/*
TODO!!
*/
if (returnFlag) {
XPRINT(0, stdout, ">> All Passed!\n");
}
else
XPRINT(0, stdout, ">> Failed!\n");
XPRINT(0, stdout, "\n");
return returnFlag;
}
} // namespace nts(NiuTrans.Tensor)
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#ifndef __TEST_COPYINDEXED_H__
#define __TEST_COPYINDEXED_H__
#include "../core/CopyIndexed.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for CopyIndexed Function */
extern "C"
bool TestCopyIndexed();
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_COPYINDEXED_H__
......@@ -19,26 +19,25 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#include "../XTensor.h"
#include "../XUtility.h"
#include "TCopyValues.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1 */
/* case 1: copy tensor s to tensor t */
bool TestCopyValues1()
{
/* a input tensor of size 2 * 4 */
int inputOrder = 2;
int * inputDimSize = new int[inputOrder];
inputDimSize[0] = 2;
inputDimSize[1] = 4;
/* a input tensor of size (2, 4) */
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 2;
sDimSize[1] = 4;
int inputUnitNum = 1;
for (int i = 0; i < inputOrder; i++)
inputUnitNum *= inputDimSize[i];
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
DTYPE inputData[2][4] = { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0} };
DTYPE sData[2][4] = { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0} };
DTYPE scaleFactor = 2.0;
DTYPE shiftFactor = 0.5;
......@@ -47,51 +46,54 @@ bool TestCopyValues1()
bool cpuTest = true;
/* create tensors */
XTensor * input = NewTensor(inputOrder, inputDimSize);
XTensor * output = NewTensor(inputOrder, inputDimSize);
XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(sOrder, sDimSize);
/* initialize variables */
input->SetData(inputData, inputUnitNum);
output->SetZeroAll();
s->SetData(sData, sUnitNum);
t->SetZeroAll();
/* call CopyValues function */
CopyValues(input, output);
CopyValues(s, t);
/* check results */
cpuTest = output->CheckData(input->data, inputUnitNum);
cpuTest = t->CheckData(s->data, sUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * inputGPU = NewTensor(inputOrder, inputDimSize, X_FLOAT, 1.0F, 0);
XTensor * outputGPU = NewTensor(inputOrder, inputDimSize, X_FLOAT, 1.0F, 0);
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
inputGPU->SetData(inputData, inputUnitNum);
outputGPU->SetData(inputData, inputUnitNum);
sGPU->SetData(sData, sUnitNum);
tGPU->SetData(sData, sUnitNum);
/* call CopyValues function */
CopyValues(inputGPU, outputGPU);
CopyValues(sGPU, tGPU);
/* check results */
DTYPE * dataGPU = (DTYPE*)inputGPU->data;
int size = inputUnitNum * inputGPU->unitSize;
DTYPE * dataGPU = (DTYPE*)sGPU->data;
int size = sUnitNum * sGPU->unitSize;
char * dataCPU = new char[size];
XMemCopy(dataCPU, -1, dataGPU, inputGPU->devID, size);
XMemCopy(dataCPU, -1, dataGPU, sGPU->devID, size);
gpuTest = outputGPU->CheckData(dataCPU, inputUnitNum);
gpuTest = tGPU->CheckData(dataCPU, sUnitNum);
/* destroy variables */
delete input, output;
delete inputGPU, outputGPU;
delete[] inputDimSize;
delete s;
delete t;
delete sGPU;
delete tGPU;
delete[] sDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete input, output;
delete[] inputDimSize;
delete s;
delete t;
delete[] sDimSize;
return cpuTest;
#endif // USE_CUDA
......@@ -106,7 +108,7 @@ TODO!!
extern "C"
bool TestCopyValues()
{
XPRINT(0, stdout, "[TEST CopyValues]\n");
XPRINT(0, stdout, "[TEST CopyValues] copy tensor s to tensor t \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
......
......@@ -19,10 +19,7 @@
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-20
*/
#include "../XTensor.h"
#include "../XDevice.h"
#include "../function/HardTanH.h"
#include "THardTanH.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: hard tanh function */
......@@ -68,7 +65,7 @@ bool TestHardTanH1()
HardTanH(x, y);
/* check results */
cpuTest = y->CheckData(answer, yUnitNum);
cpuTest = y->CheckData(answer, yUnitNum, 1e-4F);
#ifdef USE_CUDA
/* GPU test */
......@@ -86,7 +83,7 @@ bool TestHardTanH1()
HardTanH(xGPU, yGPU);
/* check results */
gpuTest = yGPU->CheckData(answer, yUnitNum);
gpuTest = yGPU->CheckData(answer, yUnitNum, 1e-4F);
/* destroy variables */
delete x, y, xGPU, yGPU;
......
......@@ -19,7 +19,6 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-29
*/
#include "../XTensor.h"
#include "../XUtility.h"
#include "TIdentity.h"
......@@ -110,7 +109,7 @@ bool TestIdentity2()
DTYPE xData[1][3] = { {0.0, 1.0, 2.0} };
DTYPE gData[1][3] = { {0.0, 0.0, 1.0} };
DTYPE answer[3] = {0.090031, 0.244728, -0.334759};
DTYPE dedxAnswer[3] = {0.090031, 0.244728, -0.334759};
/* CPU test */
bool cpuTest = true;
......@@ -132,31 +131,11 @@ bool TestIdentity2()
/* call Identity function */
Identity(x, y);
/* check result */
printf("CPU Test:\n");
printf("Identity Result:");
DTYPE * checkData = (DTYPE*)y->data;
for (int i = 0; i < sUnitNum; i++) {
printf("\t%f", checkData[i]);
}
printf("\n");
/* call IdentityBackward function */
IdentityBackward(g, y, x, dedy, dedx, CROSSENTROPY);
/* check result */
printf("Computer de/dx:");
checkData = (DTYPE*)dedx->data;
for (int i = 0; i < sUnitNum; i++) {
printf("\t%f", checkData[i]);
}
printf("\n");
printf("Real de/dx:");
for (int i = 0; i < sUnitNum; i++) {
printf("\t%f", answer[i]);
}
printf("\n");
cpuTest = dedx->CheckData(dedxAnswer, sUnitNum);
#ifdef USE_CUDA
/* GPU test */
......@@ -179,44 +158,33 @@ bool TestIdentity2()
/* call Identity function */
Identity(xGPU, yGPU);
/* check result */
printf("\nGPU Test:\n");
printf("Identity Result:");
checkData = (DTYPE*)y->data;
for (int i = 0; i < sUnitNum; i++) {
printf("\t%f", checkData[i]);
}
printf("\n");
/* call IdentityBackward function */
IdentityBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, CROSSENTROPY);
/* check result */
printf("Computer de/dx:");
checkData = (DTYPE*)dedxGPU->data;
int size = sUnitNum * dedxGPU->unitSize;
DTYPE * copy = new DTYPE[size];
XMemCopy(copy, -1, checkData, dedxGPU->devID, size);
for (int i = 0; i < sUnitNum; i++) {
printf("\t%f", copy[i]);
}
printf("\n");
printf("Real de/dx:");
for (int i = 0; i < sUnitNum; i++) {
printf("\t%f", answer[i]);
}
printf("\n");
gpuTest = dedxGPU->CheckData(dedxAnswer, sUnitNum);
/* destroy variables */
delete x, y, g, dedx, dedy;
delete xGPU, yGPU, gGPU, dedxGPU, dedyGPU;
delete x;
delete y;
delete g;
delete dedx;
delete dedy;
delete xGPU;
delete yGPU;
delete gGPU;
delete dedxGPU;
delete dedyGPU;
delete[] sDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete x, y, g, dedx, dedy;
delete x;
delete y;
delete g;
delete dedx;
delete dedy;
delete[] sDimSize;
return cpuTest;
......@@ -232,7 +200,7 @@ bool TestIdentity2()
extern "C"
bool TestIdentity()
{
XPRINT(0, stdout, "[TEST Identity] -------------\n");
XPRINT(0, stdout, "[TEST Identity] identity function and its backward computation \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
......@@ -245,15 +213,15 @@ bool TestIdentity()
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestIdentity2();
///* case 2 test */
//caseFlag = TestIdentity2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 2 failed!\n");
}
else
XPRINT(0, stdout, ">> case 2 passed!\n");
//if (!caseFlag) {
// returnFlag = false;
// XPRINT(0, stdout, ">> case 2 failed!\n");
//}
//else
// XPRINT(0, stdout, ">> case 2 passed!\n");
/* other cases test */
/*
......
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-02
*/
#include "../XUtility.h"
#include "TLogSoftmax.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: test LogSoftmax function.
* LogSoftmax function: y = log(e^x / \sum_{i} e^{x_i})
*/
bool TestLogSoftmax1()
{
/* a input tensor of size (2, 3) */
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 2;
sDimSize[1] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
DTYPE xData[2][3] = { {0.0, 1.0, 2.0},
{0.5, 0.7, 1.4} };
DTYPE answer[2][3] = { {-2.4076, -1.4076, -0.4076},
{-1.5435, -1.3435, -0.6435} };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize);
XTensor * y = NewTensor(sOrder, sDimSize);
/* initialize variables */
x->SetData(xData, sUnitNum);
y->SetZeroAll();
/* call LogSoftmax function */
LogSoftmax(x, y, 1);
/* check result */
cpuTest = y->CheckData(answer, sUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
xGPU->SetData(xData, sUnitNum);
yGPU->SetZeroAll();
/* call LogSoftmax function */
LogSoftmax(xGPU, yGPU, 1);
/* check result */
gpuTest = yGPU->CheckData(answer, sUnitNum);
/* destroy variables */
delete x;
delete y;
delete xGPU;
delete yGPU;
delete[] sDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete x;
delete y;
delete z;
delete[] sDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 2: test LogSoftmaxBackward function.
* dE/dx = dE/dy * dy/dx
* log softmax: y_i = log(e^{x_i} / \sum_{k} e^{x_k})
*/
bool TestLogSoftmax2()
{
/* a input tensor of size (3) */
int sOrder = 1;
int * sDimSize = new int[sOrder];
sDimSize[0] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
DTYPE xData[3] = {0.0, 1.0, 2.0};
DTYPE gData[3] = {0.5, 0.8, 1.5};
DTYPE yAnswer[3] = {-2.4076, -1.4076, -0.4076};
DTYPE dedxAnswer[3] = {-0.409969, -0.555272, -0.834759};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize);
XTensor * y = NewTensor(sOrder, sDimSize);
XTensor * g = NewTensor(sOrder, sDimSize);
XTensor * dedy = NewTensor(sOrder, sDimSize);
XTensor * dedx = NewTensor(sOrder, sDimSize);
/* initialize variables */
x->SetData(xData, sUnitNum);
g->SetData(gData, sUnitNum);
y->SetZeroAll();
dedx->SetZeroAll();
dedy->SetZeroAll();
/* call LogSoftmax function */
LogSoftmax(x, y, 0);
/* call LogSoftmaxBackward function */
LogSoftmaxBackward(g, y, x, dedy, dedx, 0, CROSSENTROPY);
/* check result */
cpuTest = y->CheckData(yAnswer, sUnitNum) && dedx->CheckData(dedxAnswer, sUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * gGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
xGPU->SetData(xData, sUnitNum);
gGPU->SetData(gData, sUnitNum);
yGPU->SetZeroAll();
dedxGPU->SetZeroAll();
dedyGPU->SetZeroAll();
/* call LogSoftmax function */
LogSoftmax(xGPU, yGPU, 0);
/* call LogSoftmaxBackward function */
LogSoftmaxBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, 0, CROSSENTROPY);
/* check result */
gpuTest = yGPU->CheckData(yAnswer, sUnitNum) && dedxGPU->CheckData(dedxAnswer, sUnitNum);
/* destroy variables */
delete x;
delete y;
delete g;
delete dedx;
delete dedy;
delete xGPU;
delete yGPU;
delete gGPU;
delete dedxGPU;
delete dedyGPU;
delete[] sDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete x;
delete y;
delete g;
delete dedx;
delete dedy;
delete[] sDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 3: test LogSoftmaxBackward function.
* dE/dx = dE/dy * dy/dx
* log softmax: y_i = log(e^{x_i} / \sum_{k} e^{x_k})
*/
bool TestLogSoftmax3()
{
/* a tensor of size (1, 3) */
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 1;
sDimSize[1] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
DTYPE xData[1][3] = { {0.0, 1.0, 2.0} };
DTYPE gData[1][3] = { {0.5, 0.8, 1.5} };
DTYPE yAnswer[1][3] = {-2.4076, -1.4076, -0.4076};
DTYPE dedxAnswer[1][3] = {-0.409969, -0.555272, -0.834759};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize);
XTensor * y = NewTensor(sOrder, sDimSize);
XTensor * g = NewTensor(sOrder, sDimSize);
XTensor * dedy = NewTensor(sOrder, sDimSize);
XTensor * dedx = NewTensor(sOrder, sDimSize);
/* initialize variables */
x->SetData(xData, sUnitNum);
g->SetData(gData, sUnitNum);
y->SetZeroAll();
dedx->SetZeroAll();
dedy->SetZeroAll();
/* call LogSoftmax function */
LogSoftmax(x, y, 1);
/* call LogSoftmaxBackward function */
LogSoftmaxBackward(g, y, x, dedy, dedx, 1, CROSSENTROPY);
/* check result */
cpuTest = y->CheckData(yAnswer, sUnitNum) && dedx->CheckData(dedxAnswer, sUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * gGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
xGPU->SetData(xData, sUnitNum);
gGPU->SetData(gData, sUnitNum);
yGPU->SetZeroAll();
dedxGPU->SetZeroAll();
dedyGPU->SetZeroAll();
/* call LogSoftmax function */
LogSoftmax(xGPU, yGPU, 1);
/* call LogSoftmaxBackward function */
LogSoftmaxBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, 1, CROSSENTROPY);
/* check result */
gpuTest = yGPU->CheckData(yAnswer, sUnitNum) && dedxGPU->CheckData(dedxAnswer, sUnitNum);
/* destroy variables */
delete x;
delete y;
delete g;
delete dedx;
delete dedy;
delete xGPU;
delete yGPU;
delete gGPU;
delete dedxGPU;
delete dedyGPU;
delete[] sDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete x;
delete y;
delete g;
delete dedx;
delete dedy;
delete[] sDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
*/
/* test for LogSoftmax Function */
extern "C"
bool TestLogSoftmax()
{
XPRINT(0, stdout, "[TEST LogSoftmax] test log softmax function and its backward computation \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
caseFlag = TestLogSoftmax1();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
///* case 2 test */
//caseFlag = TestLogSoftmax2();
//if (!caseFlag) {
// returnFlag = false;
// XPRINT(0, stdout, ">> case 2 failed!\n");
//}
//else
// XPRINT(0, stdout, ">> case 2 passed!\n");
/* case 3 test */
caseFlag = TestLogSoftmax3();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 3 failed!\n");
}
else
XPRINT(0, stdout, ">> case 3 passed!\n");
/* other cases test */
/*
TODO!!
*/
if (returnFlag) {
XPRINT(0, stdout, ">> All Passed!\n");
}
else
XPRINT(0, stdout, ">> Failed!\n");
XPRINT(0, stdout, "\n");
return returnFlag;
}
} // namespace nts(NiuTrans.Tensor)
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-02
*/
#ifndef __TEST_LOGSOFTMAX_H__
#define __TEST_LOGSOFTMAX_H__
#include "../function/LogSoftmax.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for LogSoftmax Function */
extern "C"
bool TestLogSoftmax();
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_LOGSOFTMAX_H__
......@@ -19,91 +19,240 @@
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2018-04-30
*/
#include "../XTensor.h"
#include "../XDevice.h"
#include "../core/ScaleAndShift.h"
#include "../function/Loss.h"
namespace nts { // namespace nt(NiuTrans.Tensor)
/* case 1 */
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: test LossCompute function
* In this case, Loss function name = SQUAREDERROR.
* loss = sum_{i} 0.5*(t_i - y_i)^2,
* where t_i is the gold standard and y_i is the model output
*/
bool TestLoss1()
{
/* a tensor of size 10000 * 1 */
/* a tensor of size (10, 1) */
int order = 2;
int * dimSize = new int[order];
dimSize[0] = 10;
dimSize[1] = 1;
int unitNum = 1;
for (int i = 0; i < order; i++)
unitNum *= dimSize[i];
/* CPU test */
bool cpuTest = true;
DTYPE answer = 5.0F;
/* create tensors */
XTensor * output = NewTensor(order, dimSize);
XTensor * gold = NewTensor(order, dimSize);
/* initialize variables */
output->SetZeroAll();
gold->SetZeroAll();
ScaleAndShift(output, 1, 1);
ScaleAndShift(gold, 1, 2);
DTYPE error;
error = LossCompute(gold, output, SQUAREDERROR, false, 0, 0, dimSize[0], 0);
/* check results */
cpuTest = (error == answer);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensor */
XTensor * outputGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * goldGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* Initialize variables */
outputGPU->SetZeroAll();
goldGPU->SetZeroAll();
ScaleAndShift(outputGPU, 1, 1);
ScaleAndShift(goldGPU, 1, 2);
/* call LossCompute function */
error = LossCompute(goldGPU, outputGPU, SQUAREDERROR, false, 0, 0, dimSize[0], 0);
/* check results */
gpuTest = (error == answer);
/* destroy variables */
delete output;
delete gold;
delete outputGPU;
delete goldGPU;
delete[] dimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete output;
delete gold;
delete[] dimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 2: test LossCompute function
* In this case, Loss function name = CROSSENTROPY.
* loss = sum_{i} (-t_i * log(y_i))
* where t_i is the gold standard and y_i is the model output
*/
bool TestLoss2()
{
/* a tensor of size (10, 1) */
int order = 2;
int * dimSize = new int[order];
dimSize[0] = 10;
dimSize[1] = 1;
int unitNum = 1;
for (int i = 0; i < order; i++)
unitNum *= dimSize[i];
/* CPU test */
bool cpuTest = true;
DTYPE answer = 0.0F;
/* create tensors */
XTensor * output = NewTensor(order, dimSize);
XTensor * gold = NewTensor(order, dimSize);
/* initialize variables */
output->SetZeroAll();
gold->SetZeroAll();
ScaleAndShift(output, 1, 1);
ScaleAndShift(gold, 1, 2);
DTYPE error;
error = LossCompute(gold, output, CROSSENTROPY, false, 0, 0, dimSize[0], 0);
/* check results */
cpuTest = (error == answer);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensor */
XTensor * outputGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * goldGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* Initialize variables */
outputGPU->SetZeroAll();
goldGPU->SetZeroAll();
ScaleAndShift(outputGPU, 1, 1);
ScaleAndShift(goldGPU, 1, 2);
/* call LossCompute function */
error = LossCompute(goldGPU, outputGPU, CROSSENTROPY, false, 0, 0, dimSize[0], 0);
/* check results */
gpuTest = (error == answer);
/* destroy variables */
delete output;
delete gold;
delete outputGPU;
delete goldGPU;
delete[] dimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete output;
delete gold;
delete[] dimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 3: test LossCompute function
* In this case, Loss function name = ONEHOTERROR.
* loss = sum_{i} e_i
* where e_i = 0.5*(t_i - y_i)^2 if t_i = 1, e_i = 0 otherwise
*/
bool TestLoss3()
{
/* a tensor of size (10, 1) */
int order = 2;
int order_reduce = 1;
int * dimSize = new int[order];
dimSize[0] = 10000;
dimSize[0] = 5;
dimSize[1] = 1;
int unitNum = 1;
for (int i = 0; i < order; i++)
unitNum *= dimSize[i];
DTYPE outputData[5][1] = { {0.5},
{0.5},
{0.5},
{0.5},
{0.5} };
DTYPE goldData[5][1] = { {1.0},
{1.0},
{0.0},
{0.0},
{0.0} };
/* CPU test */
bool cpuTest = true;
DTYPE answer = 0.25F;
/* create tensors */
XTensor * a = NewTensor(order, dimSize);
XTensor * b = NewTensor(order, dimSize);
XTensor * output = NewTensor(order, dimSize);
XTensor * gold = NewTensor(order, dimSize);
/* initialize variables */
DTYPE* a_data = (DTYPE*)a->data;
for (int i = 0; i < unitNum; i++)
*a_data++ = 1;
DTYPE* b_data = (DTYPE*)b->data;
for (int i = 0; i < unitNum; i++)
*b_data++ = 1;
DTYPE error = 0.0F;
error = LossCompute(a, b, SQUAREDERROR, false, 1, 0, dimSize[0], 0);
printf("%d", error);
/* call reduce max function */
//ReduceMax(a, reduce_a, 0);
//ReduceMax(b, reduce_b, 1);
//DTYPE* reduce_a_data = (DTYPE*)reduce_a->data;
//for (int i = 0; i < unitNum_a; i++)
// printf("%f ", *reduce_a_data++);
//printf("\n");
//DTYPE* reduce_b_data = (DTYPE*)reduce_b->data;
//for (int i = 0; i < unitNum_b; i++)
// printf("%f ", *reduce_b_data++);
output->SetData(outputData, unitNum);
gold->SetData(goldData, unitNum);
DTYPE error;
error = LossCompute(gold, output, ONEHOTERROR, false, 0, 0, dimSize[0], 0);
/* check results */
cpuTest = true;
cpuTest = (error == answer);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensor */
XTensor * aGPU = NewTensor(order, dimSize, X_FLOAT);
XTensor * bGPU = NewTensor(order, dimSize, X_FLOAT);
XTensor * outputGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * goldGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* Initialize variables */
DTYPE* aGPU_data = (DTYPE*)aGPU->data;
for (int i = 0; i < unitNum; i++)
*aGPU_data++ = 1;
DTYPE* bGPU_data = (DTYPE*)bGPU->data;
for (int i = 0; i < unitNum; i++)
*bGPU_data++ = 1;
error = LossCompute(a, b, SQUAREDERROR, false, 1, 0, dimSize[0], 0);
printf("%d", error);
/* call reduce max function */
//ReduceMax(aGPU, reduce_aGPU, 0);
//ReduceMax(bGPU, reduce_bGPU, 1);
outputGPU->SetData(outputData, unitNum);
goldGPU->SetData(goldData, unitNum);
/* call LossCompute function */
error = LossCompute(goldGPU, outputGPU, ONEHOTERROR, false, 0, 0, dimSize[0], 0);
/* check results */
gpuTest = true;
gpuTest = (error == answer);
/* destroy variables */
delete aGPU, bGPU;
delete output;
delete gold;
delete outputGPU;
delete goldGPU;
delete[] dimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete a;
delete b;
delete output;
delete gold;
delete[] dimSize;
return cpuTest;
#endif // USE_CUDA
}
......@@ -113,11 +262,11 @@ bool TestLoss1()
TODO!!
*/
/* test for Sum Function */
/* test for Loss Function */
extern "C"
bool TestLoss()
bool TestLoss()
{
XPRINT(0, stdout, "[TEST Loss]\n");
XPRINT(0, stdout, "[TEST Loss] compute the loss \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
......@@ -129,6 +278,23 @@ extern "C"
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestLoss2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 2 failed!\n");
}
else
XPRINT(0, stdout, ">> case 2 passed!\n");
caseFlag = TestLoss3();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 3 failed!\n");
}
else
XPRINT(0, stdout, ">> case 3 passed!\n");
///* other cases test */
///*
//TODO!!
......@@ -145,4 +311,4 @@ extern "C"
return returnFlag;
}
} // namespace nt(NiuTrans.Tensor)
} // namespace nts(NiuTrans.Tensor)
......@@ -26,9 +26,9 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for Sum Function */
/* test for Loss Function */
extern "C"
bool TestLoss();
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_SUM_H__
#endif // __TEST_LOSS_H__
......@@ -19,13 +19,12 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15
*/
#include "../XTensor.h"
#include "TMatrixMULBatchedCPU.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: matrix multiplication in batch mode (CPU code).
* In this case, aList=2*(2, 3), bList=2*(2, 3) -> c=2*(2, 2),
transposedA=X_NOTRANS, transposedB=X_NOTRANS.
* In this case, aList=2*(2, 3), bList=2*(3, 2) -> c=2*(2, 2),
* transposedA=X_NOTRANS, transposedB=X_NOTRANS.
*/
bool TestMatrixMulBatchedCPU1()
{
......@@ -110,18 +109,12 @@ bool TestMatrixMulBatchedCPU1()
MatrixMULBatchedCPU(aList, X_NOTRANS, bList, X_NOTRANS, cList);
/* check results */
cpuTest = c1->CheckData(answer1, cUnitNum) && cpuTest;
cpuTest = c2->CheckData(answer2, cUnitNum) && cpuTest;
cpuTest = c1->CheckData(answer1, cUnitNum) && c2->CheckData(answer2, cUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* clear list */
aList->Clear();
bList->Clear();
cList->Clear();
/* create tensors */
XTensor * aGPU1 = NewTensor(aOrder, aDimSize, X_FLOAT, 1.0F, 0);
XTensor * aGPU2 = NewTensor(aOrder, aDimSize, X_FLOAT, 1.0F, 0);
......@@ -137,32 +130,56 @@ bool TestMatrixMulBatchedCPU1()
bGPU2->SetData(bData2, aUnitNum);
cGPU1->SetZeroAll();
cGPU2->SetZeroAll();
/* clear list */
aList->Clear();
bList->Clear();
cList->Clear();
/* add tensors to list */
aList->Add(a1);
aList->Add(a2);
bList->Add(b1);
bList->Add(b2);
cList->Add(c1);
cList->Add(c2);
aList->Add(aGPU1);
aList->Add(aGPU2);
bList->Add(bGPU1);
bList->Add(bGPU2);
cList->Add(cGPU1);
cList->Add(cGPU2);
/* call MatrixMULBatchedCPU function */
MatrixMULBatchedCPU(aList, X_NOTRANS, bList, X_NOTRANS, cList);
/* check results */
gpuTest = c1->CheckData(answer1, cUnitNum) && gpuTest;
gpuTest = c2->CheckData(answer2, cUnitNum) && gpuTest;
gpuTest = cGPU1->CheckData(answer1, cUnitNum) && gpuTest;
gpuTest = cGPU2->CheckData(answer2, cUnitNum) && gpuTest;
/* destroy variables */
delete a1, a2, b1, b2, c1, c2;
delete aGPU1, aGPU2, bGPU1, bGPU2, cGPU1, cGPU2;
delete[] aDimSize, bDimSize, cDimSize;
delete a1;
delete a2;
delete b1;
delete b2;
delete c1;
delete c2;
delete aGPU1;
delete aGPU2;
delete bGPU1;
delete bGPU2;
delete cGPU1;
delete cGPU2;
delete[] aDimSize;
delete[] bDimSize;
delete[] cDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete a1, a2, b1, b2, c1, c2;
delete[] aDimSize, bDimSize, cDimSize;
delete a1;
delete a2;
delete b1;
delete b2;
delete c1;
delete c2;
delete[] aDimSize;
delete[] bDimSize;
delete[] cDimSize;
return cpuTest;
#endif // USE_CUDA
......@@ -177,7 +194,7 @@ bool TestMatrixMulBatchedCPU1()
extern "C"
bool TestMatrixMulBatchedCPU()
{
XPRINT(0, stdout, "[TEST MATRIXMULBATCHEDCPU] -------------\n");
XPRINT(0, stdout, "[TEST MATRIXMULBATCHEDCPU] matrix multiplication in batch mode (CPU code) \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
......@@ -190,15 +207,6 @@ bool TestMatrixMulBatchedCPU()
else
XPRINT(0, stdout, ">> case 1 passed!\n");
///* case 2 test */
//caseFlag = TestMatrixMulBatchedCPU2();
//if (!caseFlag) {
// returnFlag = false;
// XPRINT(0, stdout, ">> case 2 failed!\n");
//}
//else
// XPRINT(0, stdout, ">> case 2 passed!\n");
/* other cases test */
/*
TODO!!
......
......@@ -19,7 +19,6 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-14
*/
#include "../XTensor.h"
#include "TMatrixMul.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
......@@ -59,13 +58,13 @@ bool TestMatrixMul1()
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData1[2][3] = { {1.0F, 2.0F, 3.0F},
{-4.0F, 5.0F, 6.0F} };
DTYPE sData2[3][2] = { {0.0F, -1.0F},
{1.0F, 2.0F},
{2.0F, 1.0F} };
DTYPE answer[2][2] = { {8.0F, 6.0F},
{17.0F, 20.0F} };
DTYPE sData1[2][3] = { {1.0, 2.0, 3.0},
{-4.0, 5.0, 6.0} };
DTYPE sData2[3][2] = { {0.0, -1.0},
{1.0, 2.0},
{2.0, 1.0} };
DTYPE answer[2][2] = { {8.0, 6.0},
{17.0, 20.0} };
/* CPU test */
bool cpuTest = true;
......@@ -167,14 +166,14 @@ bool TestMatrixMul2()
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData1[3][2] = { {1.0F, -4.0F},
{2.0F, 5.0F},
{3.0F, 6.0F} };
DTYPE sData2[3][2] = { {0.0F, -1.0F},
{1.0F, 2.0F},
{2.0F, 1.0F} };
DTYPE answer[2][2] = { {8.0F, 6.0F},
{17.0F, 20.0F} };
DTYPE sData1[3][2] = { {1.0, -4.0},
{2.0, 5.0},
{3.0, 6.0} };
DTYPE sData2[3][2] = { {0.0, -1.0},
{1.0, 2.0},
{2.0, 1.0} };
DTYPE answer[2][2] = { {8.0, 6.0},
{17.0, 20.0} };
/* CPU test */
bool cpuTest = true;
......@@ -280,30 +279,30 @@ bool TestMatrixMul3()
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData1[3][2][3] = { { {0.0F, -1.0F, 2.0},
{2.0F, 1.0F, 3.0} },
{ {1.0F, 2.0F, 4.0},
{3.0F, 1.0F, 2.0}},
{ {-1.0F, 3.0F, 2.0},
{1.0F, -1.0F, 0.0} } };
DTYPE sData2[2][3][2] = { { {1.0F, 2.0F},
{-4.0F, 3.0F},
{2.0F, 6.0F} },
{ {1.0F, 2.0F},
{3.0F, 4.0F},
{5.0F, 6.0F} } };
DTYPE answer[3][2][2][2] = { { { {8.0F, 9.0F},
{4.0F, 25.0F} },
{ {7.0F, 8.0F},
{20.0F, 26.0F} } },
{ { {1.0F, 32.0F},
{3.0F, 21.0F} },
{ {27.0F, 34.0F},
{16.0F, 22.0F} } },
{ { {-9.0F, 19.0F},
{5.0F, -1.0F} },
{ {18.0F, 22.0F},
{-2.0F, -2.0F} } } };
DTYPE sData1[3][2][3] = { { {0.0, -1.0, 2.0},
{2.0, 1.0, 3.0} },
{ {1.0, 2.0, 4.0},
{3.0, 1.0, 2.0}},
{ {-1.0, 3.0, 2.0},
{1.0, -1.0, 0.0} } };
DTYPE sData2[2][3][2] = { { {1.0, 2.0},
{-4.0, 3.0},
{2.0, 6.0} },
{ {1.0, 2.0},
{3.0, 4.0},
{5.0, 6.0} } };
DTYPE answer[3][2][2][2] = { { { {8.0, 9.0},
{4.0, 25.0} },
{ {7.0, 8.0},
{20.0, 26.0} } },
{ { {1.0, 32.0},
{3.0, 21.0} },
{ {27.0, 34.0},
{16.0, 22.0} } },
{ { {-9.0, 19.0},
{5.0, -1.0} },
{ {18.0, 22.0},
{-2.0, -2.0} } } };
/* CPU test */
bool cpuTest = true;
......@@ -407,21 +406,21 @@ bool TestMatrixMul4()
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData1[3][2][3] = { { {0.0F, -1.0F, 2.0F},
{2.0F, 1.0F, 3.0F} },
{ {1.0F, 2.0F, 4.0F},
{3.0F, 1.0F, 2.0F}},
{ {-1.0F, 3.0F, 2.0F},
{1.0F, -1.0F, 0.0F} } };
DTYPE sData2[3][2] = { {1.0F, 2.0F},
{3.0F, 4.0F},
{5.0F, 6.0F} };
DTYPE answer[3][2][2] = { { {7.0F, 8.0F},
{20.0F, 26.0F} },
{ {27.0F, 34.0F},
{16.0F, 22.0F} },
{ {18.0F, 22.0F},
{-2.0F, -2.0F} } };
DTYPE sData1[3][2][3] = { { {0.0, -1.0, 2.0},
{2.0, 1.0, 3.0} },
{ {1.0, 2.0, 4.0},
{3.0, 1.0, 2.0}},
{ {-1.0, 3.0, 2.0},
{1.0, -1.0, 0.0} } };
DTYPE sData2[3][2] = { {1.0, 2.0},
{3.0, 4.0},
{5.0, 6.0} };
DTYPE answer[3][2][2] = { { {7.0, 8.0},
{20.0, 26.0} },
{ {27.0, 34.0},
{16.0, 22.0} },
{ {18.0, 22.0},
{-2.0, -2.0} } };
/* CPU test */
bool cpuTest = true;
......
......@@ -19,13 +19,12 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15
*/
#include "../XTensor.h"
#include "TMatrixMul2D.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: matrix multiplication (for 2d tensors).
* In this case, a=(2, 3), b=(3, 2) -> c=(2, 2), transposedA=X_NOTRANS,
transposedB=X_NOTRANS.
* In this case, a=(2, 3), b=(3, 2) -> c=(2, 2),
* transposedA=X_NOTRANS, transposedB=X_NOTRANS.
*/
bool TestMatrixMul2D1()
{
......@@ -107,22 +106,33 @@ bool TestMatrixMul2D1()
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s1, s2, t, sGPU1, sGPU2, tGPU;
delete[] sDimSize1, sDimSize2, tDimSize;
delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s1, s2, t;
delete[] sDimSize1, sDimSize2, tDimSize;
delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 2: matrix multiplication (for 2d tensors).
* In this case, a=(3, 2), b=(2, 3) -> c=(2, 2), transposedA=X_TRANS,
transposedB=X_NOTRANS.
/* case 2: matrix multiplication (for 2d tensors).
* In this case, a=(3, 2), b=(3, 2) -> c=(2, 2),
* transposedA=X_TRANS, transposedB=X_NOTRANS.
*/
bool TestMatrixMul2D2()
{
......@@ -205,14 +215,25 @@ bool TestMatrixMul2D2()
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s1, s2, t, sGPU1, sGPU2, tGPU;
delete[] sDimSize1, sDimSize2, tDimSize;
delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s1, s2, t;
delete[] sDimSize1, sDimSize2, tDimSize;
delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
......@@ -227,7 +248,7 @@ bool TestMatrixMul2D2()
extern "C"
bool TestMatrixMul2D()
{
XPRINT(0, stdout, "[TEST MATRIXMUL2D] -------------\n");
XPRINT(0, stdout, "[TEST MATRIXMUL2D] matrix multiplication (for 2d tensors) \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
......
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-06
*/
#include "TMatrixMul2DParallel.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: matrix multiplication (for 2d tensors) with multi-threading.
* In this case, a=(2, 3), b=(3, 2) -> c=(2, 2),
* transposedA=X_NOTRANS, transposedB=X_NOTRANS.
*/
bool TestMatrixMul2DParallel1()
{
/* a source tensor of size (2, 3) */
int sOrder1 = 2;
int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 2;
sDimSize1[1] = 3;
int sUnitNum1 = 1;
for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i];
/* a source tensor of size (3, 2) */
int sOrder2 = 2;
int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 3;
sDimSize2[1] = 2;
int sUnitNum2 = 1;
for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i];
/* a target tensor of size (2, 2) */
int tOrder = 2;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
tDimSize[1] = 2;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData1[2][3] = { {1.0, 2.0, 3.0},
{-4.0, 5.0, 6.0} };
DTYPE sData2[3][2] = { {0.0, -1.0},
{1.0, 2.0},
{2.0, 1.0} };
DTYPE answer[2][2] = { {8.0, 6.0},
{17.0, 20.0} };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s1 = NewTensor(sOrder1, sDimSize1);
XTensor * s2 = NewTensor(sOrder2, sDimSize2);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s1->SetData(sData1, sUnitNum1);
s2->SetData(sData2, sUnitNum2);
t->SetZeroAll();
/* call MatrixMul2DParallel function */
MatrixMul2DParallel(s1, X_NOTRANS, s2, X_NOTRANS, t);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
/* destroy variables */
delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest;
}
/* case 2: matrix multiplication (for 2d tensors) with multi-threading.
* In this case, a=(3, 2), b=(3, 2) -> c=(2, 2),
* transposedA=X_TRANS, transposedB=X_NOTRANS.
*/
bool TestMatrixMul2DParallel2()
{
/* a source tensor of size (3, 2) */
int sOrder1 = 2;
int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 3;
sDimSize1[1] = 2;
int sUnitNum1 = 1;
for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i];
/* a source tensor of size (3, 2) */
int sOrder2 = 2;
int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 3;
sDimSize2[1] = 2;
int sUnitNum2 = 1;
for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i];
/* a target tensor of size (2, 2) */
int tOrder = 2;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
tDimSize[1] = 2;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData1[3][2] = { {1.0, -4.0},
{2.0, 5.0},
{3.0, 6.0} };
DTYPE sData2[3][2] = { {0.0, -1.0},
{1.0, 2.0},
{2.0, 1.0} };
DTYPE answer[2][2] = { {8.0, 6.0},
{17.0, 20.0} };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s1 = NewTensor(sOrder1, sDimSize1);
XTensor * s2 = NewTensor(sOrder2, sDimSize2);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s1->SetData(sData1, sUnitNum1);
s2->SetData(sData2, sUnitNum2);
t->SetZeroAll();
/* call MatrixMul2DParallel function */
MatrixMul2DParallel(s1, X_TRANS, s2, X_NOTRANS, t);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
/* destroy variables */
delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest;
}
/* other cases */
/*
TODO!!
*/
/* test for MatrixMul2DParallel Function */
extern "C"
bool TestMatrixMul2DParallel()
{
XPRINT(0, stdout, "[TEST MatrixMul2DParallel] matrix multiplication (for 2d tensors) with multi-threading \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
caseFlag = TestMatrixMul2DParallel1();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestMatrixMul2DParallel2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 2 failed!\n");
}
else
XPRINT(0, stdout, ">> case 2 passed!\n");
/* other cases test */
/*
TODO!!
*/
if (returnFlag) {
XPRINT(0, stdout, ">> All Passed!\n");
}
else
XPRINT(0, stdout, ">> Failed!\n");
XPRINT(0, stdout, "\n");
return returnFlag;
}
} // namespace nts(NiuTrans.Tensor)
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-06
*/
#ifndef __TEST_MATRIXMUL2DPARALLEL_H__
#define __TEST_MATRIXMUL2DPARALLEL_H__
#include "../core/MatrixMul2DParallel.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for MatrixMul2DParallel Function */
extern "C"
bool TestMatrixMul2DParallel();
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_MATRIXMUL2DPARALLEL_H__
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15
*/
#include "TMatrixMULBatched.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: matrix multiplication of the two tensors.
* In this case, a=(2, 3), b=(2, 3) -> c=(2, 2), transposedA=X_NOTRANS,
transposedB=X_NOTRANS.
*/
bool TestMatrixMulBatched1()
{
/* a source tensor of size (2, 3) */
int sOrder1 = 2;
int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 2;
sDimSize1[1] = 3;
int sUnitNum1 = 1;
for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i];
/* a source tensor of size (3, 2) */
int sOrder2 = 2;
int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 3;
sDimSize2[1] = 2;
int sUnitNum2 = 1;
for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i];
/* a target tensor of size (2, 2) */
int tOrder = 2;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
tDimSize[1] = 2;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData1[2][3] = { {1.0, 2.0, 3.0},
{-4.0, 5.0, 6.0} };
DTYPE sData2[3][2] = { {0.0, -1.0},
{1.0, 2.0},
{2.0, 1.0} };
DTYPE answer[2][2] = { {8.0, 6.0},
{17.0, 20.0} };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s1 = NewTensor(sOrder1, sDimSize1);
XTensor * s2 = NewTensor(sOrder2, sDimSize2);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s1->SetData(sData1, sUnitNum1);
s2->SetData(sData2, sUnitNum2);
t->SetZeroAll();
/* call MatrixMulBatched function */
MatrixMulBatched(s1, X_NOTRANS, s2, X_NOTRANS, t);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensor */
XTensor * sGPU1 = NewTensor(sOrder1, sDimSize1, X_FLOAT, 1.0F, 0);
XTensor * sGPU2 = NewTensor(sOrder2, sDimSize2, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
/* Initialize variables */
sGPU1->SetData(sData1, sUnitNum1);
sGPU2->SetData(sData2, sUnitNum2);
tGPU->SetZeroAll();
/* call MatrixMulBatched function */
MatrixMulBatched(sGPU1, X_NOTRANS, sGPU2, X_NOTRANS, tGPU);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 2: matrix multiplication of the two tensors.
* In this case, a=(2, 2, 3), b=(2, 3, 2) -> c=(2, 2, 2),
* transposedA=X_NOTRANS, transposedB=X_NOTRANS.
*/
bool TestMatrixMulBatched2()
{
/* a source tensor of size (2, 2, 3) */
int sOrder1 = 3;
int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 2;
sDimSize1[1] = 2;
sDimSize1[2] = 3;
int sUnitNum1 = 1;
for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i];
/* a source tensor of size (2, 3, 2) */
int sOrder2 = 3;
int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 2;
sDimSize2[1] = 3;
sDimSize2[2] = 2;
int sUnitNum2 = 1;
for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i];
/* a target tensor of size (2, 2, 2) */
int tOrder = 3;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
tDimSize[1] = 2;
tDimSize[2] = 2;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData1[2][2][3] = { { {0.0, -1.0, 2.0},
{2.0, 1.0, 3.0} },
{ {1.0, 2.0, 4.0},
{3.0, 1.0, 2.0} } };
DTYPE sData2[2][3][2] = { { {1.0, 2.0},
{-4.0, 3.0},
{2.0, 6.0} },
{ {1.0, 2.0},
{3.0, 4.0},
{5.0, 6.0} } };
DTYPE answer[2][2][2] = { { {8.0, 9.0},
{4.0, 25.0} },
{ {27.0, 34.0},
{16.0, 22.0} } };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s1 = NewTensor(sOrder1, sDimSize1);
XTensor * s2 = NewTensor(sOrder2, sDimSize2);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s1->SetData(sData1, sUnitNum1);
s2->SetData(sData2, sUnitNum2);
t->SetZeroAll();
/* call MatrixMulBatched function */
MatrixMulBatched(s1, X_NOTRANS, s2, X_NOTRANS, t);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensor */
XTensor * sGPU1 = NewTensor(sOrder1, sDimSize1, X_FLOAT, 1.0F, 0);
XTensor * sGPU2 = NewTensor(sOrder2, sDimSize2, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
/* Initialize variables */
sGPU1->SetData(sData1, sUnitNum1);
sGPU2->SetData(sData2, sUnitNum2);
tGPU->SetZeroAll();
/* call MatrixMulBatched function */
MatrixMulBatched(sGPU1, X_NOTRANS, sGPU2, X_NOTRANS, tGPU);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
*/
/* test for TestMatrixMulBatched Function */
extern "C"
bool TestMatrixMulBatched()
{
XPRINT(0, stdout, "[TEST MATRIXMULBATCHED] matrix multiplication of the two tensors \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
caseFlag = TestMatrixMulBatched1();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestMatrixMulBatched2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 2 failed!\n");
}
else
XPRINT(0, stdout, ">> case 2 passed!\n");
/* other cases test */
/*
TODO!!
*/
if (returnFlag) {
XPRINT(0, stdout, ">> All Passed!\n");
}
else
XPRINT(0, stdout, ">> Failed!\n");
XPRINT(0, stdout, "\n");
return returnFlag;
}
} // namespace nts(NiuTrans.Tensor)
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15
*/
#ifndef __TEST_MATRIXMULBATCHED_H__
#define __TEST_MATRIXMULBATCHED_H__
#include "../core/MatrixMulBatched.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for MatrixMulBatched Function */
extern "C"
bool TestMatrixMulBatched();
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_MATRIXMULBATCHED_H__
......@@ -88,21 +88,29 @@ bool TestMerge1()
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s, t, sGPU, tGPU;
delete[] sDimSize, tDimSize;
delete s;
delete t;
delete sGPU;
delete tGPU;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s, t;
delete[] sDimSize, tDimSize;
delete s;
delete t;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 2: transform a tensor by merging it along with a dimension.
* In this case, (2, 2, 3) -> (4, 3), whereToMerge=1, leadingDim=0.
* In this case,
(2, 2, 3) -> (4, 3), whereToMerge=1, leadingDim=0.
(2, 2, 3) -> (2, 6), whereToMerge=2, leadingDim=0.
*/
bool TestMerge2()
{
......@@ -118,40 +126,55 @@ bool TestMerge2()
sUnitNum *= sDimSize[i];
/* a target tensor of size (4, 3) */
int tOrder = 2;
int * tDimSize = new int[tOrder];
tDimSize[0] = 4;
tDimSize[1] = 3;
int tOrder1 = 2;
int * tDimSize1 = new int[tOrder1];
tDimSize1[0] = 4;
tDimSize1[1] = 3;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
int tUnitNum1 = 1;
for (int i = 0; i < tOrder1; i++)
tUnitNum1 *= tDimSize1[i];
/* a target tensor of size (2, 6) */
int tOrder2 = 2;
int * tDimSize2 = new int[tOrder2];
tDimSize2[0] = 2;
tDimSize2[1] = 6;
int tUnitNum2 = 1;
for (int i = 0; i < tOrder2; i++)
tUnitNum2 *= tDimSize2[i];
DTYPE sData[2][2][3] = { { {0.0, 1.0, 2.0},
{4.0, 5.0, 6.0} },
{ {-1.0, 2.0, 3.0},
{-4.0, -5.0, -6.0} } };
DTYPE answer[4][3] = { {0.0, 1.0, 2.0},
{4.0, 5.0, 6.0},
{-1.0, 2.0, 3.0},
{-4.0, -5.0, -6.0} };
DTYPE answer1[4][3] = { {0.0, 1.0, 2.0},
{4.0, 5.0, 6.0},
{-1.0, 2.0, 3.0},
{-4.0, -5.0, -6.0} };
DTYPE answer2[2][6] = { {0.0, 1.0, 2.0, -1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, -4.0, -5.0, -6.0} };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
XTensor * t1 = NewTensor(tOrder1, tDimSize1);
XTensor * t2 = NewTensor(tOrder2, tDimSize2);
/* initialize variables */
s->SetData(sData, sUnitNum);
t->SetZeroAll();
t1->SetZeroAll();
t2->SetZeroAll();
/* call merge function */
Merge(s, t, 1, 0);
Merge(s, t1, 1, 0);
Merge(s, t2, 2, 0);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
cpuTest = t1->CheckData(answer1, tUnitNum1) && t2->CheckData(answer2, tUnitNum2);
#ifdef USE_CUDA
/* GPU test */
......@@ -159,121 +182,50 @@ bool TestMerge2()
/* create tensor */
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU1 = NewTensor(tOrder1, tDimSize1, X_FLOAT, 1.0F, 0);
XTensor * tGPU2 = NewTensor(tOrder2, tDimSize2, X_FLOAT, 1.0F, 0);
/* Initialize variables */
sGPU->SetData(sData, sUnitNum);
tGPU->SetZeroAll();
/* call merge function */
Merge(sGPU, tGPU, 1, 0);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s, t, sGPU, tGPU;
delete[] sDimSize, tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s, t;
delete[] sDimSize, tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 3: transform a tensor by merging it along with a dimension.
* In this case, (2, 3, 4) -> (3, 8), whereToMerge=0, leadingDim=2.
*/
bool TestMerge3()
{
/* a source tensor of size (2, 3, 4) */
int sOrder = 3;
int * sDimSize = new int[sOrder];
sDimSize[0] = 2;
sDimSize[1] = 3;
sDimSize[2] = 4;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a target tensor of size (8, 3) */
int tOrder = 2;
int * tDimSize = new int[tOrder];
tDimSize[0] = 3;
tDimSize[1] = 8;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData[2][3][4] = { { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0},
{8.0, 9.0, 10.0, 11.0} },
{ {0.0, -1.0, -2.0, -3.0},
{-4.0, -5.0, -6.0, -7.0},
{-8.0, -9.0, -10.0, -11.0} } };
DTYPE answer[3][8] = { {0.0, 1.0, 2.0, 3.0, 0.0, -1.0, -2.0, -3.0},
{4.0, 5.0, 6.0, 7.0, -4.0, -5.0, -6.0, -7.0},
{8.0, 9.0, 10.0, 11.0, -8.0, -9.0, -10.0, -11.0} };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s->SetData(sData, sUnitNum);
t->SetZeroAll();
tGPU1->SetZeroAll();
tGPU2->SetZeroAll();
/* call merge function */
Merge(s, t, 2, 0);
Merge(sGPU, tGPU1, 1, 0);
Merge(sGPU, tGPU2, 2, 0);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensor */
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
/* Initialize variables */
sGPU->SetData(sData, sUnitNum);
tGPU->SetZeroAll();
/* call merge function */
Merge(sGPU, tGPU, 2, 0);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
gpuTest = tGPU1->CheckData(answer1, tUnitNum1) && tGPU2->CheckData(answer2, tUnitNum2);
/* destroy variables */
delete s, t, sGPU, tGPU;
delete[] sDimSize, tDimSize;
delete s;
delete t1;
delete t2;
delete sGPU;
delete tGPU1;
delete tGPU2;
delete[] sDimSize;
delete[] tDimSize1;
delete[] tDimSize2;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s, t;
delete[] sDimSize, tDimSize;
delete s;
delete t1;
delete t2;
delete[] sDimSize;
delete[] tDimSize1;
delete[] tDimSize2;
return cpuTest;
#endif // USE_CUDA
}
/* case 4: merge small tensors into a big tensor.
/* case 3: merge small tensors into a big tensor.
In this case, 2 * (2, 4) -> (4, 4), whereToMerge=0.
*/
bool TestMerge4()
bool TestMerge3()
{
/* create list */
XList * smallList = new XList();
......@@ -358,24 +310,36 @@ bool TestMerge4()
/* check results */
cpuTest = tGPU->CheckData(answer, tUnitNum);
delete s1, s2, t, sGPU1, sGPU2, tGPU;
delete[] sDimSize, tDimSize;
/* destroy variables */
delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize;
delete[] tDimSize;
delete smallList;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s1, s2, t;
delete[] sDimSize, tDimSize;
delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
delete smallList;
return cpuTest;
#endif // USE_CUDA
}
/* case 5: merge small tensors into a big tensor.
/* case 4: merge small tensors into a big tensor.
In this case, 2 * (2, 4) -> (2, 8), whereToMerge=1.
*/
bool TestMerge5()
bool TestMerge4()
{
/* create list */
XList * smallList = new XList();
......@@ -458,15 +422,27 @@ bool TestMerge5()
/* check results */
cpuTest = tGPU->CheckData(answer, tUnitNum);
delete s1, s2, t, sGPU1, sGPU2, tGPU;
delete[] sDimSize, tDimSize;
/* destroy variables */
delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize;
delete[] tDimSize;
delete smallList;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s1, s2, t;
delete[] sDimSize, tDimSize;
delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
delete smallList;
return cpuTest;
#endif // USE_CUDA
......@@ -481,7 +457,7 @@ bool TestMerge5()
extern "C"
bool TestMerge()
{
XPRINT(0, stdout, "[TEST MERGE] -------------\n");
XPRINT(0, stdout, "[TEST MERGE] transform a tensor by merging it alone with a dimension or merge small tensors into a big tensor\n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
......@@ -521,15 +497,6 @@ bool TestMerge()
else
XPRINT(0, stdout, ">> case 4 passed!\n");
/* case 5 test */
caseFlag = TestMerge5();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 5 failed!\n");
}
else
XPRINT(0, stdout, ">> case 5 passed!\n");
/* other cases test */
/*
TODO!!
......
......@@ -19,17 +19,16 @@
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-15
*/
#include "../XTensor.h"
#include "../XDevice.h"
#include "../core/MultiplyElementWise.h"
#include "TMultiplyElementWise.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: element-wise product of two tensors, c(i) = a(i)*b(i) + \alpha * c(i)
* In this case, (2 * 1) (2 * 1) -> (2 * 1), leadingDim=0, alpha=0.
/* case 1: element-wise product of two tensors
* c(i) = a(i)*b(i) + \alpha * c(i)
* In this case, (2, 1) (2, 1) -> (2, 1), leadingDim=0, alpha=0.
*/
bool TestMultiplyElementWise1()
{
/* a source tensor of size 2 * 1 */
/* a source tensor of size (2, 1) */
int sOrder1 = 2;
int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 2;
......@@ -39,7 +38,7 @@ bool TestMultiplyElementWise1()
for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i];
/* a source tensor of size 2 * 1 */
/* a source tensor of size (2, 1) */
int sOrder2 = 2;
int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 2;
......@@ -49,7 +48,7 @@ bool TestMultiplyElementWise1()
for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i];
/* a target tensor of size 2 * 1 */
/* a target tensor of size (2, 1) */
int tOrder = 2;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
......@@ -59,9 +58,12 @@ bool TestMultiplyElementWise1()
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData1[2][1] = { {0.0}, {1.0} };
DTYPE sData2[2][1] = { {2.0}, {3.0} };
DTYPE answer[2][1] = { {0.0}, {3.0} };
DTYPE sData1[2][1] = { {0.0},
{1.0} };
DTYPE sData2[2][1] = { {2.0},
{3.0} };
DTYPE answer[2][1] = { {0.0},
{3.0} };
/* CPU test */
bool cpuTest = true;
......@@ -76,7 +78,7 @@ bool TestMultiplyElementWise1()
s2->SetData(sData2, sUnitNum2);
t->SetZeroAll();
/* call multiplyelementwise function */
/* call MultiplyElementWise function */
MultiplyElementWise(s1, s2, t, 0);
/* check results */
......@@ -96,32 +98,44 @@ bool TestMultiplyElementWise1()
sGPU2->SetData(sData2, sUnitNum2);
tGPU->SetZeroAll();
/* call multiplyelementwise function */
/* call MultiplyElementWise function */
MultiplyElementWise(sGPU1, sGPU2, tGPU, 0);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s1, s2, t, sGPU1, sGPU2, tGPU;
delete[] sDimSize1, sDimSize2, tDimSize;
delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s1, s2, t;
delete[] sDimSize1, sDimSize2, tDimSize;
/* destroy variables */
delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 2: element-wise product of two tensors, c(i) = a(i)*b(i) + \alpha * c(i)
* In this case, (2 * 2) (2 * 2) -> (2 * 2), leadingDim=0, alpha=0.
/* case 2: element-wise product of two tensors
* c(i) = a(i)*b(i) + \alpha * c(i)
* In this case, (2, 2) (2, 2) -> (2, 2), leadingDim=0, alpha=0.
*/
bool TestMultiplyElementWise2()
{
/* a source tensor of size (2 * 2) */
/* a source tensor of size (2, 2) */
int sOrder1 = 2;
int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 2;
......@@ -131,7 +145,7 @@ bool TestMultiplyElementWise2()
for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i];
/* a source tensor of size (2 * 2) */
/* a source tensor of size (2, 2) */
int sOrder2 = 2;
int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 2;
......@@ -141,7 +155,7 @@ bool TestMultiplyElementWise2()
for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i];
/* a target tensor of size (2 * 2) */
/* a target tensor of size (2, 2) */
int tOrder = 2;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
......@@ -171,7 +185,7 @@ bool TestMultiplyElementWise2()
s2->SetData(sData2, sUnitNum2);
t->SetZeroAll();
/* call multiplyelementwise function */
/* call MultiplyElementWise function */
MultiplyElementWise(s1, s2, t, 0);
/* check results */
......@@ -191,32 +205,43 @@ bool TestMultiplyElementWise2()
sGPU2->SetData(sData2, sUnitNum2);
tGPU->SetZeroAll();
/* call multiplyelementwise function */
/* call MultiplyElementWise function */
MultiplyElementWise(sGPU1, sGPU2, tGPU, 0);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s1, s2, t, sGPU1, sGPU2, tGPU;
delete[] sDimSize1, sDimSize2, tDimSize;
delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s1, s2, t;
delete[] sDimSize1, sDimSize2, tDimSize;
/* destroy variables */
delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 3: element-wise product of two tensors, c(i) = a(i)*b(i) + \alpha * c(i)
* In this case, (2 * 2) (2 * 2) -> (2 * 2), leadingDim=1, alpha=0.
* In this case, (2, 2) (2, 2) -> (2, 2), leadingDim=1, alpha=0.
*/
bool TestMultiplyElementWise3()
{
/* a source tensor of size (2 * 2) */
/* a source tensor of size (2, 2) */
int sOrder1 = 2;
int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 2;
......@@ -226,7 +251,7 @@ bool TestMultiplyElementWise3()
for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i];
/* a source tensor of size (2 * 2) */
/* a source tensor of size (2, 2) */
int sOrder2 = 2;
int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 2;
......@@ -236,7 +261,7 @@ bool TestMultiplyElementWise3()
for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i];
/* a target tensor of size (2 * 2) */
/* a target tensor of size (2, 2) */
int tOrder = 2;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
......@@ -266,7 +291,7 @@ bool TestMultiplyElementWise3()
s2->SetData(sData2, sUnitNum2);
t->SetZeroAll();
/* call multiplyelementwise function */
/* call MultiplyElementWise function */
MultiplyElementWise(s1, s2, t, 1);
/* check results */
......@@ -286,21 +311,32 @@ bool TestMultiplyElementWise3()
sGPU2->SetData(sData2, sUnitNum2);
tGPU->SetZeroAll();
/* call multiplyelementwise function */
/* call MultiplyElementWise function */
MultiplyElementWise(sGPU1, sGPU2, tGPU, 1);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s1, s2, t, sGPU1, sGPU2, tGPU;
delete[] sDimSize1, sDimSize2, tDimSize;
delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s1, s2, t;
delete[] sDimSize1, sDimSize2, tDimSize;
/* destroy variables */
delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
......@@ -315,7 +351,7 @@ TODO!!
extern "C"
bool TestMultiplyElementWise()
{
XPRINT(0, stdout, "[TEST MULTIPLYELEMENTWISE] -------------\n");
XPRINT(0, stdout, "[TEST MULTIPLYELEMENTWISE] element-wise product of two tensors \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
......
......@@ -19,15 +19,13 @@
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-14
*/
#include "../XTensor.h"
#include "../XDevice.h"
#include "../core/Negate.h"
#include "TNegate.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: set every entry to its minus value */
bool TestNegate1()
{
/* a tensor of size 3 * 2 */
/* a tensor of size (3, 2) */
int aOrder = 2;
int * aDimSize = new int[aOrder];
aDimSize[0] = 3;
......@@ -53,12 +51,12 @@ bool TestNegate1()
/* initialize variables */
a->SetData(aData, aUnitNum);
/* call negate function */
/* call Negate function */
Negate(a);
/* check results */
cpuTest = a->CheckData(answer, aUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
......@@ -69,14 +67,15 @@ bool TestNegate1()
/* Initialize variables */
aGPU->SetData(aData, aUnitNum);
/* call negate function */
/* call Negate function */
Negate(aGPU);
/* check results */
gpuTest = aGPU->CheckData(answer, aUnitNum);
/* destroy variables */
delete a, aGPU;
delete a;
delete aGPU;
delete[] aDimSize;
return cpuTest && gpuTest;
......@@ -92,7 +91,7 @@ bool TestNegate1()
/* case 2: set every entry to its minus value */
bool TestNegate2()
{
/* a tensor of size 3 * 2 */
/* a tensor of size (3, 2) */
int aOrder = 2;
int * aDimSize = new int[aOrder];
aDimSize[0] = 3;
......@@ -118,7 +117,7 @@ bool TestNegate2()
/* initialize variables */
a->SetData(aData, aUnitNum);
/* call negate function */
/* call Negate function */
Negate(a);
/* check results */
......@@ -134,14 +133,15 @@ bool TestNegate2()
/* Initialize variables */
aGPU->SetData(aData, aUnitNum);
/* call negate function */
/* call Negate function */
Negate(aGPU);
/* check results */
gpuTest = aGPU->CheckData(answer, aUnitNum);
/* destroy variables */
delete a, aGPU;
delete a;
delete aGPU;
delete[] aDimSize;
return cpuTest && gpuTest;
......@@ -163,7 +163,7 @@ TODO!!
extern "C"
bool TestNegate()
{
XPRINT(0, stdout, "[TEST NEGATE] -------------\n");
XPRINT(0, stdout, "[TEST NEGATE] set every entry to its minus value \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
......
......@@ -19,17 +19,17 @@
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-20
*/
#include "../XTensor.h"
#include "../XDevice.h"
#include "../core/Normalize.h"
#include "TNormalize.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: normalized the data with normal distribution
* In this case, dim=0.
* For an input x, y = a * (x-mean)/sqrt(variance+\epsilon) + b.
* where a and b are the scalar and bias respectively,
* and \epsilon is the adjustment parameter.
*/
bool TestNormalize1()
{
/* a source tensor of size 2 * 3 */
/* a source tensor of size (2, 3) */
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 2;
......@@ -39,7 +39,7 @@ bool TestNormalize1()
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a target tensor of size 2 * 3 */
/* a target tensor of size (2, 3) */
int tOrder = 2;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
......@@ -49,7 +49,7 @@ bool TestNormalize1()
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
/* a mean tensor of size 3 */
/* a mean tensor of size (3) */
int meanOrder = 1;
int * meanDimSize = new int[meanOrder];
meanDimSize[0] = 3;
......@@ -58,7 +58,7 @@ bool TestNormalize1()
for (int i = 0; i < meanOrder; i++)
meanUnitNum *= meanDimSize[i];
/* a var tensor of size 3 */
/* a variance tensor of size (3) */
int varOrder = 1;
int * varDimSize = new int[varOrder];
varDimSize[0] = 3;
......@@ -67,7 +67,7 @@ bool TestNormalize1()
for (int i = 0; i < varOrder; i++)
varUnitNum *= varDimSize[i];
/* a a tensor of size 2 * 3 */
/* a scalar tensor of size (2, 3) */
int aOrder = 2;
int * aDimSize = new int[aOrder];
aDimSize[0] = 2;
......@@ -77,7 +77,7 @@ bool TestNormalize1()
for (int i = 0; i < aOrder; i++)
aUnitNum *= aDimSize[i];
/* a b tensor of size 2 * 3 */
/* a bias tensor of size (2, 3) */
int bOrder = 2;
int * bDimSize = new int[bOrder];
bDimSize[0] = 2;
......@@ -87,41 +87,39 @@ bool TestNormalize1()
for (int i = 0; i < bOrder; i++)
bUnitNum *= bDimSize[i];
DTYPE sData[2][3] = { {0.5, -1.0, 2.0},
{3.5, -4.5, 1.0} };
DTYPE meanData[3] = {2.0, -2.75, 1.5};
DTYPE varData[3] = {4.5, 6.125, 0.5};
DTYPE aData[2][3] = { {0.0, 0.0, 0.0},
{0.0, 0.0, 0.0} };
DTYPE bData[2][3] = { {0.0, 0.0, 0.0},
{0.0, 0.0, 0.0} };
DTYPE answer[2][3] = { {0.0, 0.0, 0.0},
{0.0, 0.0, 0.0} };
DTYPE sData[2][3] = { {1.0, 2.0, 3.0},
{1.5, 2.5, 3.5} };
DTYPE meanData[3] = {1.0, 1.5, 2.0};
DTYPE varData[3] = {1.0, 1.0, 4.0};
DTYPE aData[2][3] = { {1.0, 1.0, 1.0},
{1.0, 1.0, 1.0} };
DTYPE answer[2][3] = { {0.0, 0.5, 0.5},
{0.5, 1.0, 0.75} };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
XTensor * mean = NewTensor(meanOrder, meanDimSize);
XTensor * var = NewTensor(varOrder, varDimSize);
XTensor * a = NewTensor(aOrder, aDimSize);
XTensor * b = NewTensor(bOrder, bDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s->SetData(sData, sUnitNum);
mean->SetData(meanData, meanUnitNum);
var->SetData(varData, varUnitNum);
a->SetData(aData, aUnitNum);
b->SetData(bData, bUnitNum);
b->SetZeroAll();
t->SetZeroAll();
/* call normalize function */
Normalize(s, t, 0, mean, var, a, b, 0.0);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
cpuTest = t->CheckData(answer, tUnitNum, 1e-4, 0);
#ifdef USE_CUDA
/* GPU test */
......@@ -140,24 +138,50 @@ bool TestNormalize1()
meanGPU->SetData(meanData, meanUnitNum);
varGPU->SetData(varData, varUnitNum);
aGPU->SetData(aData, aUnitNum);
bGPU->SetData(bData, bUnitNum);
bGPU->SetZeroAll();
tGPU->SetZeroAll();
/* call normalize function */
/* call Normalize function */
Normalize(sGPU, tGPU, 0, meanGPU, varGPU, aGPU, bGPU, 0.0);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
gpuTest = tGPU->CheckData(answer, tUnitNum, 1e-4, 0);
/* destroy variables */
delete s, t, mean, var, a, b, sGPU, tGPU, meanGPU, varGPU, aGPU, bGPU;
delete[] sDimSize, tDimSize, meanDimSize, varDimSize, aDimSize, bDimSize;
delete s;
delete t;
delete mean;
delete var;
delete a;
delete b;
delete sGPU;
delete tGPU;
delete meanGPU;
delete varGPU;
delete aGPU;
delete bGPU;
delete[] sDimSize;
delete[] tDimSize;
delete[] meanDimSize;
delete[] varDimSize;
delete[] aDimSize;
delete[] bDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s, t, mean, var, a, b;
delete[] sDimSize, tDimSize, meanDimSize, varDimSize, aDimSize, bDimSize;
delete s;
delete t;
delete mean;
delete var;
delete a;
delete b;
delete[] sDimSize;
delete[] tDimSize;
delete[] meanDimSize;
delete[] varDimSize;
delete[] aDimSize;
delete[] bDimSize;
return cpuTest;
#endif // USE_CUDA
......@@ -172,7 +196,7 @@ TODO!!
extern "C"
bool TestNormalize()
{
XPRINT(0, stdout, "[TEST NORMALIZE] -------------\n");
XPRINT(0, stdout, "[TEST NORMALIZE] normalized the data with normal distribution \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
......
......@@ -19,9 +19,8 @@
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-15
*/
#include "../XTensor.h"
#include "../XDevice.h"
#include "../core/Power.h"
#include "../XUtility.h"
#include "TPower.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: get the power(a, p)
......@@ -29,7 +28,7 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
*/
bool TestPower1()
{
/* a tensor of size 3 * 2 */
/* a tensor of size (3, 2) */
int aOrder = 2;
int * aDimSize = new int[aOrder];
aDimSize[0] = 3;
......@@ -55,11 +54,11 @@ bool TestPower1()
/* initialize variables */
a->SetData(aData, aUnitNum);
/* call power function */
/* call Power function */
Power(a, 2.0);
/* check results */
cpuTest = a->CheckData(answer, aUnitNum);
cpuTest = a->CheckData(answer, aUnitNum, 1e-4F);
#ifdef USE_CUDA
/* GPU test */
......@@ -75,10 +74,11 @@ bool TestPower1()
Power(aGPU, 2.0);
/* check results */
gpuTest = aGPU->CheckData(answer, aUnitNum, 0.0001F);
gpuTest = aGPU->CheckData(answer, aUnitNum, 1e-4F);
/* destroy variables */
delete a, aGPU;
delete a;
delete aGPU;
delete[] aDimSize;
return cpuTest && gpuTest;
......@@ -96,7 +96,7 @@ bool TestPower1()
*/
bool TestPower2()
{
/* a tensor of size 3 * 2 */
/* a tensor of size (3, 2) */
int aOrder = 2;
int * aDimSize = new int[aOrder];
aDimSize[0] = 3;
......@@ -122,11 +122,11 @@ bool TestPower2()
/* initialize variables */
a->SetData(aData, aUnitNum);
/* call power function */
/* call Power function */
Power(a, 1.0);
/* check results */
cpuTest = a->CheckData(answer, aUnitNum);
cpuTest = a->CheckData(answer, aUnitNum, 1e-4F);
#ifdef USE_CUDA
/* GPU test */
......@@ -138,14 +138,15 @@ bool TestPower2()
/* Initialize variables */
aGPU->SetData(aData, aUnitNum);
/* call power function */
/* call Power function */
Power(aGPU, 1.0);
/* check results */
gpuTest = aGPU->CheckData(answer, aUnitNum);
gpuTest = aGPU->CheckData(answer, aUnitNum, 1e-4F);
/* destroy variables */
delete a, aGPU;
delete a;
delete aGPU;
delete[] aDimSize;
return cpuTest && gpuTest;
......@@ -163,7 +164,7 @@ bool TestPower2()
*/
bool TestPower3()
{
/* a tensor of size 3 * 2 */
/* a tensor of size (3, 2) */
int aOrder = 2;
int * aDimSize = new int[aOrder];
aDimSize[0] = 3;
......@@ -189,11 +190,11 @@ bool TestPower3()
/* initialize variables */
a->SetData(aData, aUnitNum);
/* call power function */
/* call Power function */
Power(a, 0.0);
/* check results */
cpuTest = a->CheckData(answer, aUnitNum);
cpuTest = a->CheckData(answer, aUnitNum, 1e-4F);
#ifdef USE_CUDA
/* GPU test */
......@@ -205,14 +206,15 @@ bool TestPower3()
/* Initialize variables */
aGPU->SetData(aData, aUnitNum);
/* call power function */
/* call Power function */
Power(aGPU, 0.0);
/* check results */
gpuTest = aGPU->CheckData(answer, aUnitNum);
gpuTest = aGPU->CheckData(answer, aUnitNum, 1e-4F);
/* destroy variables */
delete a, aGPU;
delete a;
delete aGPU;
delete[] aDimSize;
return cpuTest && gpuTest;
......@@ -234,7 +236,7 @@ TODO!!
extern "C"
bool TestPower()
{
XPRINT(0, stdout, "[TEST POWER] -------------\n");
XPRINT(0, stdout, "[TEST POWER] get the power(a, p) \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
......
......@@ -16,7 +16,7 @@
*/
/*
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2018-04-30
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-30
*/
#ifndef __TEST_REDUCEMAX_H__
......@@ -24,12 +24,11 @@
#include "../core/ReduceMax.h"
namespace nts { // namespace nt(NiuTrans.Tensor)
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for ReduceMax Function */
extern "C"
bool TestReduceMax();
} // namespace nt(NiuTrans.Tensor)
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_REDUCEMAX_H__
......@@ -19,106 +19,111 @@
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2018-04-30
*/
#include "../XTensor.h"
#include "../XDevice.h"
#include "../core/ReduceMean.h"
#include "../core/ReduceMax.h"
#include "../core/ReduceSum.h"
namespace nts { // namespace nt(NiuTrans.Tensor)
/* case 1 */
#include "TReduceSum.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: sum the items along a dimension of the tensor.
* In this case,
(2, 4) -> (4), dim = 0
(2, 4) -> (2), dim = 1
*/
bool TestReduceSum1()
{
/* a tensor of size 2 * 4 */
int order = 2;
int order_reduce = 1;
int * dimSize = new int[order];
dimSize[0] = 2;
dimSize[1] = 4;
int unitNum = 1;
for (int i = 0; i < order; i++)
unitNum *= dimSize[i];
/* a tensor of size 4 */
int * dimSize_reduce_a = new int[order_reduce];
dimSize_reduce_a[0] = 4;
int unitNum_a = 1;
for (int i = 0; i < order_reduce; i++)
unitNum_a *= dimSize_reduce_a[i];
/* a tensor of size 2 */
int * dimSize_reduce_b = new int[order_reduce];
dimSize_reduce_b[0] = 2;
int unitNum_b = 1;
for (int i = 0; i < order_reduce; i++)
unitNum_b *= dimSize_reduce_b[i];
DTYPE aData[2][4] = { { 0.0, 1.0, 2.0, 3.0 },
{ 4.0, 5.0, 6.0, 7.0 } };
DTYPE bData[2][4] = { { 1.0, -1.0, -3.0, -5.0 },
{ -7.0, -9.0, -11.0, -13.0 } };
DTYPE answer_a[4] = { 4.0, 6.0, 8.0, 10.0 };
DTYPE answer_b[2] = { -8.0, -40.0 };
/* a tensor of size (2, 4) */
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 2;
sDimSize[1] = 4;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a tensor of size (4) */
int tOrder1 = 1;
int * tDimSize1 = new int[tOrder1];
tDimSize1[0] = 4;
int tUnitNum1 = 1;
for (int i = 0; i < tOrder1; i++)
tUnitNum1 *= tDimSize1[i];
/* a tensor of size (2) */
int tOrder2 = 1;
int * tDimSize2 = new int[tOrder2];
tDimSize2[0] = 2;
int tUnitNum2 = 1;
for (int i = 0; i < tOrder2; i++)
tUnitNum2 *= tDimSize2[i];
DTYPE sData[2][4] = { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0} };
DTYPE answer1[4] = {4.0, 6.0, 8.0, 10.0};
DTYPE answer2[2] = {6.0, 22.0};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * a = NewTensor(order, dimSize);
XTensor * reduce_a = NewTensor(order_reduce, dimSize_reduce_a);
XTensor * b = NewTensor(order, dimSize);
XTensor * reduce_b = NewTensor(order_reduce, dimSize_reduce_b);
XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t1 = NewTensor(tOrder1, tDimSize1);
XTensor * t2 = NewTensor(tOrder2, tDimSize2);
/* initialize variables */
a->SetData(aData, unitNum);
b->SetData(bData, unitNum);
s->SetData(sData, sUnitNum);
t1->SetZeroAll();
t2->SetZeroAll();
/* call reduce sum function */
ReduceSum(a, reduce_a, 0);
ReduceSum(b, reduce_b, 1);
//DTYPE* reduce_a_data = (DTYPE*)reduce_a->data;
//for (int i = 0; i < unitNum_a; i++)
// printf("%f ", *reduce_a_data++);
//printf("\n");
//DTYPE* reduce_b_data = (DTYPE*)reduce_b->data;
//for (int i = 0; i < unitNum_b; i++)
// printf("%f ", *reduce_b_data++);
/* call ReduceSum function */
ReduceSum(s, t1, 0);
ReduceSum(s, t2, 1);
/* check results */
cpuTest = reduce_a->CheckData(answer_a, unitNum_a) && reduce_b->CheckData(answer_b, unitNum_b);
cpuTest = t1->CheckData(answer1, tUnitNum1) && t2->CheckData(answer2, tUnitNum2);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensor */
XTensor * aGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * reduce_aGPU = NewTensor(order_reduce, dimSize_reduce_a, X_FLOAT, 1.0F, 0);
XTensor * bGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * reduce_bGPU = NewTensor(order_reduce, dimSize_reduce_b, X_FLOAT, 1.0F, 0);
/* create tensors */
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU1 = NewTensor(tOrder1, tDimSize1, X_FLOAT, 1.0F, 0);
XTensor * tGPU2 = NewTensor(tOrder2, tDimSize2, X_FLOAT, 1.0F, 0);
/* Initialize variables */
aGPU->SetData(aData, unitNum);
bGPU->SetData(bData, unitNum);
/* initialize variables */
sGPU->SetData(sData, sUnitNum);
tGPU1->SetZeroAll();
tGPU2->SetZeroAll();
/* call reduce sum function */
ReduceSum(aGPU, reduce_aGPU, 0);
ReduceSum(bGPU, reduce_bGPU, 1);
/* call ReduceSum function */
ReduceSum(sGPU, tGPU1, 0);
ReduceSum(sGPU, tGPU2, 1);
/* check results */
gpuTest = reduce_aGPU->CheckData(answer_a, unitNum_a) && reduce_bGPU->CheckData(answer_b, unitNum_b);
cpuTest = tGPU1->CheckData(answer1, tUnitNum1) && tGPU2->CheckData(answer2, tUnitNum2);
/* destroy variables */
delete aGPU, bGPU, reduce_aGPU, reduce_bGPU;
delete[] dimSize, dimSize_reduce_a, dimSize_reduce_b;
delete s;
delete t1;
delete t2;
delete sGPU;
delete tGPU1;
delete tGPU2;
delete[] sDimSize;
delete[] tDimSize1;
delete[] tDimSize2;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete a;
delete b;
delete s;
delete t1;
delete t2;
delete[] sDimSize;
delete[] tDimSize1;
delete[] tDimSize2;
return cpuTest;
#endif // USE_CUDA
}
......@@ -127,7 +132,7 @@ bool TestReduceSumForLargescale()
{
/* a tensor of size 10000 * 500 */
int order = 2;
int order_reduce = 1;
int orderReduce = 1;
int * dimSize = new int[order];
dimSize[0] = 10000;
dimSize[1] = 500;
......@@ -136,18 +141,18 @@ bool TestReduceSumForLargescale()
for (int i = 0; i < order; i++)
unitNum *= dimSize[i];
/* a tensor of size 500 */
int * dimSize_reduce_a = new int[order_reduce];
int * dimSize_reduce_a = new int[orderReduce];
dimSize_reduce_a[0] = 500;
int unitNum_a = 1;
for (int i = 0; i < order_reduce; i++)
for (int i = 0; i < orderReduce; i++)
unitNum_a *= dimSize_reduce_a[i];
/* a tensor of size 10000 */
int * dimSize_reduce_b = new int[order_reduce];
int * dimSize_reduce_b = new int[orderReduce];
dimSize_reduce_b[0] = 10000;
int unitNum_b = 1;
for (int i = 0; i < order_reduce; i++)
for (int i = 0; i < orderReduce; i++)
unitNum_b *= dimSize_reduce_b[i];
DTYPE * data = new DTYPE[5000000];
......@@ -166,9 +171,9 @@ bool TestReduceSumForLargescale()
/* create tensors */
XTensor * a = NewTensor(order, dimSize);
XTensor * reduce_a = NewTensor(order_reduce, dimSize_reduce_a);
XTensor * reduce_a = NewTensor(orderReduce, dimSize_reduce_a);
XTensor * b = NewTensor(order, dimSize);
XTensor * reduce_b = NewTensor(order_reduce, dimSize_reduce_b);
XTensor * reduce_b = NewTensor(orderReduce, dimSize_reduce_b);
/* initialize variables */
a->SetData(data, unitNum);
......@@ -186,9 +191,9 @@ bool TestReduceSumForLargescale()
/* create tensor */
XTensor * aGPU = NewTensor(order, dimSize, X_FLOAT);
XTensor * reduce_aGPU = NewTensor(order_reduce, dimSize_reduce_a, X_FLOAT);
XTensor * reduce_aGPU = NewTensor(orderReduce, dimSize_reduce_a, X_FLOAT);
XTensor * bGPU = NewTensor(order, dimSize, X_FLOAT);
XTensor * reduce_bGPU = NewTensor(order_reduce, dimSize_reduce_b, X_FLOAT);
XTensor * reduce_bGPU = NewTensor(orderReduce, dimSize_reduce_b, X_FLOAT);
/* Initialize variables */
aGPU->SetData(data, unitNum);
......@@ -222,7 +227,7 @@ TODO!!
extern "C"
bool TestReduceSum()
{
XPRINT(0, stdout, "[TEST ReduceSum]\n");
XPRINT(0, stdout, "[TEST ReduceSum] sum the items along a dimension of the tensor.\n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
......@@ -259,4 +264,4 @@ bool TestReduceSum()
return returnFlag;
}
} // namespace nt(NiuTrans.Tensor)
} // namespace nts(NiuTrans.Tensor)
......@@ -24,13 +24,13 @@
#include "../core/ReduceSum.h"
namespace nts { // namespace nt(NiuTrans.Tensor)
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for ReduceSum Function */
extern "C"
bool TestReduceSum();
/* test for ReduceSum Function */
extern "C"
bool TestReduceSum();
} // namespace nt(NiuTrans.Tensor)
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_REDUCESUM_H__
......
......@@ -19,33 +19,35 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#include "../XTensor.h"
#include "TReduceSumSquared.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1 */
/* case 1: squared sum of the items along a dimension of the tensor.
* For a 1-dimensional data array a, sum = \sum_i (a_i - shift)^2.
* In this case, (2, 4) -> (4), dim = 0.
*/
bool TestReduceSumSquared1()
{
/* a input tensor of size 2 * 4 */
int inputOrder = 2;
int * inputDimSize = new int[inputOrder];
inputDimSize[0] = 2;
inputDimSize[1] = 4;
int inputUnitNum = 1;
for (int i = 0; i < inputOrder; i++)
inputUnitNum *= inputDimSize[i];
/* a output tensor of size 4 */
int outputOrder = 1;
int * outputDimSize = new int[outputOrder];
outputDimSize[0] = 4;
int outputUnitNum = 1;
for (int i = 0; i < outputOrder; i++)
outputUnitNum *= outputDimSize[i];
/* a shift tensor of size 4 */
/* a input tensor of size (2, 4) */
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 2;
sDimSize[1] = 4;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a output tensor of size (4) */
int tOrder = 1;
int * tDimSize = new int[tOrder];
tDimSize[0] = 4;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
/* a shift tensor of size (4) */
int shiftOrder = 1;
int * shiftDimSize = new int[shiftOrder];
shiftDimSize[0] = 4;
......@@ -54,8 +56,8 @@ bool TestReduceSumSquared1()
for (int i = 0; i < shiftOrder; i++)
shiftUnitNum *= shiftDimSize[i];
DTYPE inputData[2][4] = { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0} };
DTYPE sData[2][4] = { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0} };
DTYPE shiftData[4] = {1.0, -1.0, -1.0, 0.0};
DTYPE answer[4] = {10.0, 40.0, 58.0, 58.0};
......@@ -63,51 +65,164 @@ bool TestReduceSumSquared1()
bool cpuTest = true;
/* create tensors */
XTensor * input = NewTensor(inputOrder, inputDimSize);
XTensor * output = NewTensor(outputOrder, outputDimSize);
XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
XTensor * shift = NewTensor(shiftOrder, shiftDimSize);
/* initialize variables */
s->SetData(sData, sUnitNum);
shift->SetData(shiftData, shiftUnitNum);
t->SetZeroAll();
/* call ReduceSumSquared function */
ReduceSumSquared(s, t, 0, shift);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
XTensor * shiftGPU = NewTensor(shiftOrder, shiftDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
sGPU->SetData(sData, sUnitNum);
shiftGPU->SetData(shiftData, shiftUnitNum);
tGPU->SetZeroAll();
/* call ReduceSumSquared function */
ReduceSumSquared(sGPU, tGPU, 0, shiftGPU);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s;
delete t;
delete shift;
delete sGPU;
delete tGPU;
delete shiftGPU;
delete[] sDimSize;
delete[] tDimSize;
delete[] shiftDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s;
delete t;
delete shift;
delete[] sDimSize;
delete[] tDimSize;
delete[] shiftDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 1: squared sum of the items along a dimension of the tensor.
* For a 1-dimensional data array a, sum = \sum_i (a_i - shift)^2.
* In this case, (2, 4) -> (2), dim = 1.
*/
bool TestReduceSumSquared2()
{
/* a input tensor of size (2, 4) */
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 2;
sDimSize[1] = 4;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a output tensor of size (4) */
int tOrder = 1;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
/* a shift tensor of size (4) */
int shiftOrder = 1;
int * shiftDimSize = new int[shiftOrder];
shiftDimSize[0] = 2;
int shiftUnitNum = 1;
for (int i = 0; i < shiftOrder; i++)
shiftUnitNum *= shiftDimSize[i];
DTYPE sData[2][4] = { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0} };
DTYPE shiftData[2] = {-1.0, 1.0};
DTYPE answer[2] = {30.0, 86.0};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
XTensor * shift = NewTensor(shiftOrder, shiftDimSize);
/* initialize variables */
input->SetData(inputData, inputUnitNum);
s->SetData(sData, sUnitNum);
shift->SetData(shiftData, shiftUnitNum);
output->SetZeroAll();
t->SetZeroAll();
/* call ReduceSumSquared function */
ReduceSumSquared(input, output, 0, shift);
ReduceSumSquared(s, t, 1, shift);
/* check results */
cpuTest = output->CheckData(answer, outputUnitNum);
cpuTest = t->CheckData(answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * inputGPU = NewTensor(inputOrder, inputDimSize, X_FLOAT, 1.0F, 0);
XTensor * outputGPU = NewTensor(outputOrder, outputDimSize, X_FLOAT, 1.0F, 0);
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
XTensor * shiftGPU = NewTensor(shiftOrder, shiftDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
inputGPU->SetData(inputData, inputUnitNum);
sGPU->SetData(sData, sUnitNum);
shiftGPU->SetData(shiftData, shiftUnitNum);
outputGPU->SetZeroAll();
tGPU->SetZeroAll();
/* call ReduceSumSquared function */
ReduceSumSquared(inputGPU, outputGPU, 0, shiftGPU);
ReduceSumSquared(sGPU, tGPU, 1, shiftGPU);
/* check results */
gpuTest = output->CheckData(answer, outputUnitNum);
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete input, output, shift;
delete inputGPU, outputGPU, shiftGPU;
delete[] inputDimSize, outputDimSize, shiftDimSize;
delete s;
delete t;
delete shift;
delete sGPU;
delete tGPU;
delete shiftGPU;
delete[] sDimSize;
delete[] tDimSize;
delete[] shiftDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete input, output, shift;
delete[] inputDimSize, outputDimSize, shiftDimSize;
delete s;
delete t;
delete shift;
delete[] sDimSize;
delete[] tDimSize;
delete[] shiftDimSize;
return cpuTest;
#endif // USE_CUDA
......@@ -122,7 +237,7 @@ TODO!!
extern "C"
bool TestReduceSumSquared()
{
XPRINT(0, stdout, "[TEST ReduceSumSquared]\n");
XPRINT(0, stdout, "[TEST ReduceSumSquared] squared sum of the items along a dimension of the tensor\n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
......@@ -133,6 +248,15 @@ bool TestReduceSumSquared()
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestReduceSumSquared2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* other cases test */
/*
......
......@@ -19,33 +19,35 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#include "../XTensor.h"
#include "TReduceVariance.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1 */
/* case 1: variance of the items along a dimension of the tensor.
* For a 1-dimensional data array a, variance = 1/n * \sum_i (a_i - mean)^2.
* In this case, (2, 4) -> (4), dim = 0.
*/
bool TestReduceVariance1()
{
/* a input tensor of size 2 * 4 */
int inputOrder = 2;
int * inputDimSize = new int[inputOrder];
inputDimSize[0] = 2;
inputDimSize[1] = 4;
int inputUnitNum = 1;
for (int i = 0; i < inputOrder; i++)
inputUnitNum *= inputDimSize[i];
/* a output tensor of size 1 */
int outputOrder = 1;
int * outputDimSize = new int[outputOrder];
outputDimSize[0] = 4;
int outputUnitNum = 1;
for (int i = 0; i < outputOrder; i++)
outputUnitNum *= outputDimSize[i];
/* a shift tensor of size 1 */
/* a input tensor of size (2, 4) */
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 2;
sDimSize[1] = 4;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a output tensor of size (4) */
int tOrder = 1;
int * tDimSize = new int[tOrder];
tDimSize[0] = 4;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
/* a mean tensor of size (4) */
int meanOrder = 1;
int * meanDimSize = new int[meanOrder];
meanDimSize[0] = 4;
......@@ -54,61 +56,70 @@ bool TestReduceVariance1()
for (int i = 0; i < meanOrder; i++)
meanUnitNum *= meanDimSize[i];
DTYPE inputData[2][4] = { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0} };
DTYPE meanData[4] = {2.0, 3.0, 4.0, 5.0};
DTYPE answer[4] = {4.0, 4.0, 4.0, 4.0};
DTYPE sData[2][4] = { {0.0F, 1.0F, 2.0F, 3.0F},
{4.0F, 5.0F, 6.0F, 7.0F} };
DTYPE meanData[4] = {2.0F, 3.0F, 4.0F, 5.0F};
DTYPE answer[4] = {4.0F, 4.0F, 4.0F, 4.0F};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * input = NewTensor(inputOrder, inputDimSize);
XTensor * output = NewTensor(outputOrder, outputDimSize);
XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
XTensor * mean = NewTensor(meanOrder, meanDimSize);
/* initialize variables */
input->SetData(inputData, inputUnitNum);
s->SetData(sData, sUnitNum);
mean->SetData(meanData, meanUnitNum);
output->SetZeroAll();
t->SetZeroAll();
/* call ReduceVariance function */
ReduceVariance(input, output, 0, mean);
ReduceVariance(s, t, 0, mean);
/* check results */
cpuTest = output->CheckData(answer, outputUnitNum);
cpuTest = t->CheckData(answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * inputGPU = NewTensor(inputOrder, inputDimSize, X_FLOAT, 1.0F, 0);
XTensor * outputGPU = NewTensor(outputOrder, outputDimSize, X_FLOAT, 1.0F, 0);
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
XTensor * meanGPU = NewTensor(meanOrder, meanDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
inputGPU->SetData(inputData, inputUnitNum);
sGPU->SetData(sData, sUnitNum);
meanGPU->SetData(meanData, meanUnitNum);
outputGPU->SetZeroAll();
tGPU->SetZeroAll();
/* call ReduceVariance function */
ReduceVariance(inputGPU, outputGPU, 0, meanGPU);
ReduceVariance(sGPU, tGPU, 0, meanGPU);
/* check results */
gpuTest = output->CheckData(answer, outputUnitNum);
gpuTest = t->CheckData(answer, tUnitNum);
/* destroy variables */
delete input, output, mean;
delete inputGPU, outputGPU, meanGPU;
delete[] inputDimSize, outputDimSize, meanDimSize;
delete s;
delete t;
delete mean;
delete sGPU;
delete tGPU;
delete meanGPU;
delete[] sDimSize;
delete[] tDimSize;
delete[] meanDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete input, output, mean;
delete[] inputDimSize, outputDimSize, meanDimSize;
delete s;
delete t;
delete mean;
delete[] sDimSize;
delete[] tDimSize;
delete[] meanDimSize;
return cpuTest;
#endif // USE_CUDA
......@@ -123,7 +134,7 @@ TODO!!
extern "C"
bool TestReduceVariance()
{
XPRINT(0, stdout, "[TEST ReduceVariance]\n");
XPRINT(0, stdout, "[TEST ReduceVariance] variance of the items along a dimension of the tensor\n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
......
......@@ -16,7 +16,7 @@
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-06
*/
#ifndef __TEST_REDUCEVARIANCE_H__
......
......@@ -19,27 +19,28 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#include "../XTensor.h"
#include "TScaleAndShift.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1 */
/* case 1: scale and shift all tensor entires.
* p = p * scale + shift
*/
bool TestScaleAndShift1()
{
/* a input tensor of size 2 * 4 */
int inputOrder = 2;
int * inputDimSize = new int[inputOrder];
inputDimSize[0] = 2;
inputDimSize[1] = 4;
/* a input tensor of size (2, 4) */
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 2;
sDimSize[1] = 4;
int inputUnitNum = 1;
for (int i = 0; i < inputOrder; i++)
inputUnitNum *= inputDimSize[i];
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
DTYPE inputData[2][4] = { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0} };
DTYPE answer[2][4] = { {0.5, 2.5, 4.5, 6.5},
{8.5, 10.5, 12.5, 14.5} };
DTYPE sData[2][4] = { {0.0F, 1.0F, 2.0F, 3.0F},
{4.0F, 5.0F, 6.0F, 7.0F} };
DTYPE answer[2][4] = { {0.5F, 2.5F, 4.5F, 6.5F},
{8.5F, 10.5F, 12.5F, 14.5F} };
DTYPE scaleFactor = 2.0;
DTYPE shiftFactor = 0.5;
......@@ -48,43 +49,43 @@ bool TestScaleAndShift1()
bool cpuTest = true;
/* create tensors */
XTensor * input = NewTensor(inputOrder, inputDimSize);
XTensor * s = NewTensor(sOrder, sDimSize);
/* initialize variables */
input->SetData(inputData, inputUnitNum);
s->SetData(sData, sUnitNum);
/* call ScaleAndShift function */
ScaleAndShift(input, scaleFactor, shiftFactor);
ScaleAndShift(s, scaleFactor, shiftFactor);
/* check results */
cpuTest = input->CheckData(answer, inputUnitNum);
cpuTest = s->CheckData(answer, sUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * inputGPU = NewTensor(inputOrder, inputDimSize, X_FLOAT, 1.0F, 0);
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
inputGPU->SetData(inputData, inputUnitNum);
sGPU->SetData(sData, sUnitNum);
/* call ScaleAndShift function */
ScaleAndShift(inputGPU, scaleFactor, shiftFactor);
ScaleAndShift(sGPU, scaleFactor, shiftFactor);
/* check results */
gpuTest = inputGPU->CheckData(answer, inputUnitNum);
gpuTest = sGPU->CheckData(answer, sUnitNum);
/* destroy variables */
delete input;
delete inputGPU;
delete[] inputDimSize;
delete s;
delete sGPU;
delete[] sDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete input;
delete[] inputDimSize;
delete s;
delete[] sDimSize;
return cpuTest;
#endif // USE_CUDA
......@@ -99,7 +100,7 @@ TODO!!
extern "C"
bool TestScaleAndShift()
{
XPRINT(0, stdout, "[TEST ScaleAndShift]\n");
XPRINT(0, stdout, "[TEST ScaleAndShift] scale and shift all tensor entires\n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
......
......@@ -16,7 +16,7 @@
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-04
*/
#include "TSelect.h"
......@@ -25,10 +25,11 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: test SelectRange function.
* It can generate a tensor with seleccted data
* in range[low,high] along the given dimension.
* In this case, (2, 2, 4) -> (2, 2, 2), dim = 2, low = 1, high = 3.
*/
bool TestSelect1()
{
/* a input tensor of size (2, 4) */
/* a input tensor of size (2, 2, 4) */
int sOrder = 3;
int * sDimSize = new int[sOrder];
sDimSize[0] = 2;
......@@ -39,23 +40,25 @@ bool TestSelect1()
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a output tensor of size (2, 2) */
/* a output tensor of size (2, 2, 2) */
int tOrder = 3;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
tDimSize[1] = 1;
tDimSize[2] = 4;
tDimSize[1] = 2;
tDimSize[2] = 2;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData[2][2][4] = { { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0} },
{ {1.0, 2.0, 3.0, 4.0},
{5.0, 6.0, 7.0, 8.0} } };
DTYPE answer[2][1][4] = { { {4.0, 5.0, 6.0, 7.0} },
{ {5.0, 6.0, 7.0, 8.0} } };
DTYPE sData[2][2][4] = { { {0.0F, 1.0F, 2.0F, 3.0F},
{4.0F, 5.0F, 6.0F, 7.0F} },
{ {1.0F, 2.0F, 3.0F, 4.0F},
{5.0F, 6.0F, 7.0F, 8.0F} } };
DTYPE answer[2][2][2] = { { {1.0F, 2.0F},
{5.0F, 6.0F} },
{ {2.0F, 3.0F},
{6.0F, 7.0F} } };
/* CPU test */
bool cpuTest = true;
......@@ -69,7 +72,7 @@ bool TestSelect1()
t->SetZeroAll();
/* call SelectRange function */
SelectRange(s, 1, 1, 2, t);
SelectRange(s, 2, 1, 3, t);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
......@@ -121,7 +124,7 @@ TODO!!
extern "C"
bool TestSelect()
{
XPRINT(0, stdout, "[TEST Select] scale and shift all tensor entires\n");
XPRINT(0, stdout, "[TEST Select] generate a tensor with seleccted data in range[low,high] along the given dimension \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论