Commit 42f995ae by liyinqiao

Bug Fixed in test.

parent 100f4611
...@@ -377,7 +377,7 @@ void LossBackward(XTensor * dedy, XTensor * t, XTensor * y, ...@@ -377,7 +377,7 @@ void LossBackward(XTensor * dedy, XTensor * t, XTensor * y,
CheckNTErrors((tLen < y->unitNum), "Illegal input length!"); CheckNTErrors((tLen < y->unitNum), "Illegal input length!");
CheckNTErrors((XTensor::IsIdentical(t, y)&& XTensor::IsIdentical(dedy, y)), CheckNTErrors((XTensor::IsIdentical(t, y)&& XTensor::IsIdentical(dedy, y)),
"The input tensors must be of the same size!"); "The input tensors must be of the same size!");
//CheckNTErrors((t->dimSizeRDI[0] == 1 && y->dimSizeRDI[0] == 1 && dedy->dimSizeRDI[0] == 1), "TODO!"); CheckNTErrors((t->dimSizeRDI[0] == 1 && y->dimSizeRDI[0] == 1 && dedy->dimSizeRDI[0] == 1), "TODO!");
CheckNTErrors((t->order > leadDim && leadDim >= 0), "Illegal leading dimension!"); CheckNTErrors((t->order > leadDim && leadDim >= 0), "Illegal leading dimension!");
CheckNTErrors((t->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE), CheckNTErrors((t->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE),
"TODO!"); "TODO!");
......
...@@ -19,23 +19,19 @@ ...@@ -19,23 +19,19 @@
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-14 * $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-14
*/ */
#include "TConcatenateSolely.h"
#include "../XTensor.h"
#include "../XDevice.h"
#include "../core/ConcatenateSolely.h"
#include "../XList.h" #include "../XList.h"
namespace nts { // namespace nt(NiuTrans.Tensor) namespace nts { // namespace nt(NiuTrans.Tensor)
/* case 1: concatenate a list of tensors along a given dimension /* case 1: concatenate a list of tensors along a given dimension
* In this case, 2 * (2 * 1) -> (2 * 2), dim=1. * In this case, 2 * (2, 1) -> (2, 2), dim=1.
*/ */
bool TestConcatenateSolely1() bool TestConcatenateSolely1()
{ {
/* create list */ /* create list */
XList sList; XList * sList = new XList();
sList = XList();
/* a source tensor of size 2 * 1 */ /* a source tensor of size (2, 1) */
int sOrder1 = 2; int sOrder1 = 2;
int * sDimSize1 = new int[sOrder1]; int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 2; sDimSize1[0] = 2;
...@@ -45,7 +41,7 @@ bool TestConcatenateSolely1() ...@@ -45,7 +41,7 @@ bool TestConcatenateSolely1()
for (int i = 0; i < sOrder1; i++) for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i]; sUnitNum1 *= sDimSize1[i];
/* a source tensor of size 2 * 1 */ /* a source tensor of size (2, 1) */
int sOrder2 = 2; int sOrder2 = 2;
int * sDimSize2 = new int[sOrder2]; int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 2; sDimSize2[0] = 2;
...@@ -55,7 +51,7 @@ bool TestConcatenateSolely1() ...@@ -55,7 +51,7 @@ bool TestConcatenateSolely1()
for (int i = 0; i < sOrder2; i++) for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i]; sUnitNum2 *= sDimSize2[i];
/* a target tensor of size 2 * 2 */ /* a target tensor of size (2, 2) */
int tOrder = 2; int tOrder = 2;
int * tDimSize = new int[tOrder]; int * tDimSize = new int[tOrder];
tDimSize[0] = 2; tDimSize[0] = 2;
...@@ -86,11 +82,11 @@ bool TestConcatenateSolely1() ...@@ -86,11 +82,11 @@ bool TestConcatenateSolely1()
t->SetZeroAll(); t->SetZeroAll();
/* add tensors to list */ /* add tensors to list */
sList.Add(s1); sList->Add(s1);
sList.Add(s2); sList->Add(s2);
/* call concatenatesolely function */ /* call ConcatenateSolely function */
ConcatenateSolely(&sList, t, 1); ConcatenateSolely(sList, t, 1);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum); cpuTest = t->CheckData(answer, tUnitNum);
...@@ -99,9 +95,6 @@ bool TestConcatenateSolely1() ...@@ -99,9 +95,6 @@ bool TestConcatenateSolely1()
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
/* clear list */
sList.Clear();
/* create tensor */ /* create tensor */
XTensor * sGPU1 = NewTensor(sOrder1, sDimSize1, X_FLOAT, 1.0F, 0); XTensor * sGPU1 = NewTensor(sOrder1, sDimSize1, X_FLOAT, 1.0F, 0);
XTensor * sGPU2 = NewTensor(sOrder2, sDimSize2, X_FLOAT, 1.0F, 0); XTensor * sGPU2 = NewTensor(sOrder2, sDimSize2, X_FLOAT, 1.0F, 0);
...@@ -111,40 +104,56 @@ bool TestConcatenateSolely1() ...@@ -111,40 +104,56 @@ bool TestConcatenateSolely1()
sGPU1->SetData(sData1, sUnitNum1); sGPU1->SetData(sData1, sUnitNum1);
sGPU2->SetData(sData2, sUnitNum2); sGPU2->SetData(sData2, sUnitNum2);
tGPU->SetZeroAll(); tGPU->SetZeroAll();
/* clear list */
sList->Clear();
/* add tensors to list*/ /* add tensors to list*/
sList.Add(sGPU1); sList->Add(sGPU1);
sList.Add(sGPU2); sList->Add(sGPU2);
/* call concatenatesolely function */ /* call ConcatenateSolely function */
ConcatenateSolely(&sList, tGPU, 1); ConcatenateSolely(sList, tGPU, 1);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum); gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1, s2, t, sGPU1, sGPU2, tGPU; delete sList;
delete[] sDimSize1, sDimSize2, tDimSize; delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete s1, s2, t; delete sList;
delete[] sDimSize1, sDimSize2, tDimSize; delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
} }
/* case 2: concatenate a list of tensors along a given dimension /* case 2: concatenate a list of tensors along a given dimension
* In this case, 2 * (2 * 1) -> (4 * 1), dim=0. * In this case, 2 * (2, 1) -> (4, 1), dim=0.
*/ */
bool TestConcatenateSolely2() bool TestConcatenateSolely2()
{ {
/* create list */ /* create list */
XList sList; XList * sList = new XList();
sList = XList();
/* a source tensor of size 2 * 1 */ /* a source tensor of size (2, 1) */
int sOrder1 = 2; int sOrder1 = 2;
int * sDimSize1 = new int[sOrder1]; int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 2; sDimSize1[0] = 2;
...@@ -154,7 +163,7 @@ bool TestConcatenateSolely2() ...@@ -154,7 +163,7 @@ bool TestConcatenateSolely2()
for (int i = 0; i < sOrder1; i++) for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i]; sUnitNum1 *= sDimSize1[i];
/* a source tensor of size 2 * 1 */ /* a source tensor of size (2, 1) */
int sOrder2 = 2; int sOrder2 = 2;
int * sDimSize2 = new int[sOrder2]; int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 2; sDimSize2[0] = 2;
...@@ -164,7 +173,7 @@ bool TestConcatenateSolely2() ...@@ -164,7 +173,7 @@ bool TestConcatenateSolely2()
for (int i = 0; i < sOrder2; i++) for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i]; sUnitNum2 *= sDimSize2[i];
/* a target tensor of size 4 * 1 */ /* a target tensor of size (4, 1) */
int tOrder = 2; int tOrder = 2;
int * tDimSize = new int[tOrder]; int * tDimSize = new int[tOrder];
tDimSize[0] = 4; tDimSize[0] = 4;
...@@ -197,11 +206,11 @@ bool TestConcatenateSolely2() ...@@ -197,11 +206,11 @@ bool TestConcatenateSolely2()
t->SetZeroAll(); t->SetZeroAll();
/* add tensors to list */ /* add tensors to list */
sList.Add(s1); sList->Add(s1);
sList.Add(s2); sList->Add(s2);
/* call concatenatesolely function */ /* call ConcatenateSolely function */
ConcatenateSolely(&sList, t, 0); ConcatenateSolely(sList, t, 0);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum); cpuTest = t->CheckData(answer, tUnitNum);
...@@ -210,9 +219,6 @@ bool TestConcatenateSolely2() ...@@ -210,9 +219,6 @@ bool TestConcatenateSolely2()
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
/* clear list */
sList.Clear();
/* create tensor */ /* create tensor */
XTensor * sGPU1 = NewTensor(sOrder1, sDimSize1, X_FLOAT, 1.0F, 0); XTensor * sGPU1 = NewTensor(sOrder1, sDimSize1, X_FLOAT, 1.0F, 0);
XTensor * sGPU2 = NewTensor(sOrder2, sDimSize2, X_FLOAT, 1.0F, 0); XTensor * sGPU2 = NewTensor(sOrder2, sDimSize2, X_FLOAT, 1.0F, 0);
...@@ -222,40 +228,56 @@ bool TestConcatenateSolely2() ...@@ -222,40 +228,56 @@ bool TestConcatenateSolely2()
sGPU1->SetData(sData1, sUnitNum1); sGPU1->SetData(sData1, sUnitNum1);
sGPU2->SetData(sData2, sUnitNum2); sGPU2->SetData(sData2, sUnitNum2);
tGPU->SetZeroAll(); tGPU->SetZeroAll();
/* clear list */
sList->Clear();
/* add tensors to list*/ /* add tensors to list*/
sList.Add(sGPU1); sList->Add(sGPU1);
sList.Add(sGPU2); sList->Add(sGPU2);
/* call concatenatesolely function */ /* call concatenatesolely function */
ConcatenateSolely(&sList, tGPU, 0); ConcatenateSolely(sList, tGPU, 0);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum); gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1, s2, t, sGPU1, sGPU2, tGPU; delete sList;
delete[] sDimSize1, sDimSize2, tDimSize; delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete s1, s2, t; delete sList;
delete[] sDimSize1, sDimSize2, tDimSize; delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
} }
/* case 3: concatenate a list of tensors along a given dimension /* case 3: concatenate a list of tensors along a given dimension
* In this case, (2 * 1) + (2 * 2) -> (2 * 3), dim=1. * In this case, (2, 1) + (2, 2) -> (2, 3), dim=1.
*/ */
bool TestConcatenateSolely3() bool TestConcatenateSolely3()
{ {
/* create list */ /* create list */
XList sList; XList * sList = new XList();
sList = XList();
/* a source tensor of size (2 * 1) */ /* a source tensor of size (2, 1) */
int sOrder1 = 2; int sOrder1 = 2;
int * sDimSize1 = new int[sOrder1]; int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 2; sDimSize1[0] = 2;
...@@ -265,7 +287,7 @@ bool TestConcatenateSolely3() ...@@ -265,7 +287,7 @@ bool TestConcatenateSolely3()
for (int i = 0; i < sOrder1; i++) for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i]; sUnitNum1 *= sDimSize1[i];
/* a source tensor of size (2 * 2) */ /* a source tensor of size (2, 2) */
int sOrder2 = 2; int sOrder2 = 2;
int * sDimSize2 = new int[sOrder2]; int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 2; sDimSize2[0] = 2;
...@@ -275,7 +297,7 @@ bool TestConcatenateSolely3() ...@@ -275,7 +297,7 @@ bool TestConcatenateSolely3()
for (int i = 0; i < sOrder2; i++) for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i]; sUnitNum2 *= sDimSize2[i];
/* a target tensor of size (2 * 3) */ /* a target tensor of size (2, 3) */
int tOrder = 2; int tOrder = 2;
int * tDimSize = new int[tOrder]; int * tDimSize = new int[tOrder];
tDimSize[0] = 2; tDimSize[0] = 2;
...@@ -306,11 +328,11 @@ bool TestConcatenateSolely3() ...@@ -306,11 +328,11 @@ bool TestConcatenateSolely3()
t->SetZeroAll(); t->SetZeroAll();
/* add tensors to list */ /* add tensors to list */
sList.Add(s1); sList->Add(s1);
sList.Add(s2); sList->Add(s2);
/* call concatenatesolely function */ /* call ConcatenateSolely function */
ConcatenateSolely(&sList, t, 1); ConcatenateSolely(sList, t, 1);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum); cpuTest = t->CheckData(answer, tUnitNum);
...@@ -319,9 +341,6 @@ bool TestConcatenateSolely3() ...@@ -319,9 +341,6 @@ bool TestConcatenateSolely3()
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
/* clear list */
sList.Clear();
/* create tensor */ /* create tensor */
XTensor * sGPU1 = NewTensor(sOrder1, sDimSize1, X_FLOAT, 1.0F, 0); XTensor * sGPU1 = NewTensor(sOrder1, sDimSize1, X_FLOAT, 1.0F, 0);
XTensor * sGPU2 = NewTensor(sOrder2, sDimSize2, X_FLOAT, 1.0F, 0); XTensor * sGPU2 = NewTensor(sOrder2, sDimSize2, X_FLOAT, 1.0F, 0);
...@@ -331,26 +350,42 @@ bool TestConcatenateSolely3() ...@@ -331,26 +350,42 @@ bool TestConcatenateSolely3()
sGPU1->SetData(sData1, sUnitNum1); sGPU1->SetData(sData1, sUnitNum1);
sGPU2->SetData(sData2, sUnitNum2); sGPU2->SetData(sData2, sUnitNum2);
tGPU->SetZeroAll(); tGPU->SetZeroAll();
/* clear list */
sList->Clear();
/* add tensors to list*/ /* add tensors to list*/
sList.Add(sGPU1); sList->Add(sGPU1);
sList.Add(sGPU2); sList->Add(sGPU2);
/* call concatenatesolely function */ /* call ConcatenateSolely function */
ConcatenateSolely(&sList, tGPU, 1); ConcatenateSolely(sList, tGPU, 1);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum); gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1, s2, t, sGPU1, sGPU2, tGPU; delete sList;
delete[] sDimSize1, sDimSize2, tDimSize; delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete s1, s2, t; delete sList;
delete[] sDimSize1, sDimSize2, tDimSize; delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
...@@ -365,7 +400,7 @@ TODO!! ...@@ -365,7 +400,7 @@ TODO!!
extern "C" extern "C"
bool TestConcatenateSolely() bool TestConcatenateSolely()
{ {
XPRINT(0, stdout, "[TEST CONCATENATESOLELY] -------------\n"); XPRINT(0, stdout, "[TEST CONCATENATESOLELY] concatenate a list of tensors along a given dimension \n");
bool returnFlag = true, caseFlag = true; bool returnFlag = true, caseFlag = true;
/* case 1 test */ /* case 1 test */
......
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#include "TCopyIndexed.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1 copy indexed sub-tensors
* In this case, (3, 2, 3) -> (3, 2, 2), dim = 2, indexSize = 2,
* srcIndex = [0, 2], tgtIndex = [0, 1], copyNum = 1.
*/
bool TestCopyIndexed1()
{
/* a input tensor of size (3, 2, 3) */
int sOrder = 3;
int * sDimSize = new int[sOrder];
sDimSize[0] = 3;
sDimSize[1] = 2;
sDimSize[2] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a output tensor of size (3, 2, 2) */
int tOrder = 3;
int * tDimSize = new int[tOrder];
tDimSize[0] = 3;
tDimSize[1] = 2;
tDimSize[2] = 2;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData[3][2][3] = { { {0.0, -1.0, 2.0},
{2.0, 1.0, 3.0} },
{ {1.0, 2.0, 4.0},
{3.0, 1.0, 2.0}},
{ {-1.0, 3.0, 2.0},
{1.0, -1.0, 0.0} } };
DTYPE answer[3][2][2] = { { {0.0, 2.0},
{2.0, 3.0} },
{ {1.0, 4.0},
{3.0, 2.0}},
{ {-1.0, 2.0},
{1.0, 0.0} } };
int dim = 2;
int indexSize = 2;
int srcIndex[2] = {0, 2};
int tgtIndex[2] = {0, 1};
int copyNum = 1;
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s->SetData(sData, sUnitNum);
t->SetZeroAll();
/* call CopyIndexed function */
CopyIndexed(s, t, dim, srcIndex, indexSize, tgtIndex, copyNum);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(sOrder, tDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
sGPU->SetData(sData, sUnitNum);
tGPU->SetZeroAll();
/* call CopyIndexed function */
CopyIndexed(sGPU, tGPU, dim, srcIndex, indexSize, tgtIndex, copyNum);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s;
delete t;
delete sGPU;
delete tGPU;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s;
delete t;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
*/
/* test for CopyIndexed Function */
extern "C"
bool TestCopyIndexed()
{
XPRINT(0, stdout, "[TEST CopyIndexed] copy indexed sub-tensors \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
caseFlag = TestCopyIndexed1();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* other cases test */
/*
TODO!!
*/
if (returnFlag) {
XPRINT(0, stdout, ">> All Passed!\n");
}
else
XPRINT(0, stdout, ">> Failed!\n");
XPRINT(0, stdout, "\n");
return returnFlag;
}
} // namespace nts(NiuTrans.Tensor)
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#ifndef __TEST_COPYINDEXED_H__
#define __TEST_COPYINDEXED_H__
#include "../core/CopyIndexed.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for CopyIndexed Function */
extern "C"
bool TestCopyIndexed();
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_COPYINDEXED_H__
...@@ -19,26 +19,25 @@ ...@@ -19,26 +19,25 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/ */
#include "../XTensor.h"
#include "../XUtility.h" #include "../XUtility.h"
#include "TCopyValues.h" #include "TCopyValues.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1 */ /* case 1: copy tensor s to tensor t */
bool TestCopyValues1() bool TestCopyValues1()
{ {
/* a input tensor of size 2 * 4 */ /* a input tensor of size (2, 4) */
int inputOrder = 2; int sOrder = 2;
int * inputDimSize = new int[inputOrder]; int * sDimSize = new int[sOrder];
inputDimSize[0] = 2; sDimSize[0] = 2;
inputDimSize[1] = 4; sDimSize[1] = 4;
int inputUnitNum = 1; int sUnitNum = 1;
for (int i = 0; i < inputOrder; i++) for (int i = 0; i < sOrder; i++)
inputUnitNum *= inputDimSize[i]; sUnitNum *= sDimSize[i];
DTYPE inputData[2][4] = { {0.0, 1.0, 2.0, 3.0}, DTYPE sData[2][4] = { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0} }; {4.0, 5.0, 6.0, 7.0} };
DTYPE scaleFactor = 2.0; DTYPE scaleFactor = 2.0;
DTYPE shiftFactor = 0.5; DTYPE shiftFactor = 0.5;
...@@ -47,51 +46,54 @@ bool TestCopyValues1() ...@@ -47,51 +46,54 @@ bool TestCopyValues1()
bool cpuTest = true; bool cpuTest = true;
/* create tensors */ /* create tensors */
XTensor * input = NewTensor(inputOrder, inputDimSize); XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * output = NewTensor(inputOrder, inputDimSize); XTensor * t = NewTensor(sOrder, sDimSize);
/* initialize variables */ /* initialize variables */
input->SetData(inputData, inputUnitNum); s->SetData(sData, sUnitNum);
output->SetZeroAll(); t->SetZeroAll();
/* call CopyValues function */ /* call CopyValues function */
CopyValues(input, output); CopyValues(s, t);
/* check results */ /* check results */
cpuTest = output->CheckData(input->data, inputUnitNum); cpuTest = t->CheckData(s->data, sUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
/* create tensors */ /* create tensors */
XTensor * inputGPU = NewTensor(inputOrder, inputDimSize, X_FLOAT, 1.0F, 0); XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * outputGPU = NewTensor(inputOrder, inputDimSize, X_FLOAT, 1.0F, 0); XTensor * tGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */ /* initialize variables */
inputGPU->SetData(inputData, inputUnitNum); sGPU->SetData(sData, sUnitNum);
outputGPU->SetData(inputData, inputUnitNum); tGPU->SetData(sData, sUnitNum);
/* call CopyValues function */ /* call CopyValues function */
CopyValues(inputGPU, outputGPU); CopyValues(sGPU, tGPU);
/* check results */ /* check results */
DTYPE * dataGPU = (DTYPE*)inputGPU->data; DTYPE * dataGPU = (DTYPE*)sGPU->data;
int size = inputUnitNum * inputGPU->unitSize; int size = sUnitNum * sGPU->unitSize;
char * dataCPU = new char[size]; char * dataCPU = new char[size];
XMemCopy(dataCPU, -1, dataGPU, inputGPU->devID, size); XMemCopy(dataCPU, -1, dataGPU, sGPU->devID, size);
gpuTest = outputGPU->CheckData(dataCPU, inputUnitNum); gpuTest = tGPU->CheckData(dataCPU, sUnitNum);
/* destroy variables */ /* destroy variables */
delete input, output; delete s;
delete inputGPU, outputGPU; delete t;
delete[] inputDimSize; delete sGPU;
delete tGPU;
delete[] sDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete input, output; delete s;
delete[] inputDimSize; delete t;
delete[] sDimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
...@@ -106,7 +108,7 @@ TODO!! ...@@ -106,7 +108,7 @@ TODO!!
extern "C" extern "C"
bool TestCopyValues() bool TestCopyValues()
{ {
XPRINT(0, stdout, "[TEST CopyValues]\n"); XPRINT(0, stdout, "[TEST CopyValues] copy tensor s to tensor t \n");
bool returnFlag = true, caseFlag = true; bool returnFlag = true, caseFlag = true;
/* case 1 test */ /* case 1 test */
......
...@@ -19,10 +19,7 @@ ...@@ -19,10 +19,7 @@
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-20 * $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-20
*/ */
#include "THardTanH.h"
#include "../XTensor.h"
#include "../XDevice.h"
#include "../function/HardTanH.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: hard tanh function */ /* case 1: hard tanh function */
...@@ -68,7 +65,7 @@ bool TestHardTanH1() ...@@ -68,7 +65,7 @@ bool TestHardTanH1()
HardTanH(x, y); HardTanH(x, y);
/* check results */ /* check results */
cpuTest = y->CheckData(answer, yUnitNum); cpuTest = y->CheckData(answer, yUnitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -86,7 +83,7 @@ bool TestHardTanH1() ...@@ -86,7 +83,7 @@ bool TestHardTanH1()
HardTanH(xGPU, yGPU); HardTanH(xGPU, yGPU);
/* check results */ /* check results */
gpuTest = yGPU->CheckData(answer, yUnitNum); gpuTest = yGPU->CheckData(answer, yUnitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x, y, xGPU, yGPU; delete x, y, xGPU, yGPU;
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-29 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-29
*/ */
#include "../XTensor.h"
#include "../XUtility.h" #include "../XUtility.h"
#include "TIdentity.h" #include "TIdentity.h"
...@@ -110,7 +109,7 @@ bool TestIdentity2() ...@@ -110,7 +109,7 @@ bool TestIdentity2()
DTYPE xData[1][3] = { {0.0, 1.0, 2.0} }; DTYPE xData[1][3] = { {0.0, 1.0, 2.0} };
DTYPE gData[1][3] = { {0.0, 0.0, 1.0} }; DTYPE gData[1][3] = { {0.0, 0.0, 1.0} };
DTYPE answer[3] = {0.090031, 0.244728, -0.334759}; DTYPE dedxAnswer[3] = {0.090031, 0.244728, -0.334759};
/* CPU test */ /* CPU test */
bool cpuTest = true; bool cpuTest = true;
...@@ -132,31 +131,11 @@ bool TestIdentity2() ...@@ -132,31 +131,11 @@ bool TestIdentity2()
/* call Identity function */ /* call Identity function */
Identity(x, y); Identity(x, y);
/* check result */
printf("CPU Test:\n");
printf("Identity Result:");
DTYPE * checkData = (DTYPE*)y->data;
for (int i = 0; i < sUnitNum; i++) {
printf("\t%f", checkData[i]);
}
printf("\n");
/* call IdentityBackward function */ /* call IdentityBackward function */
IdentityBackward(g, y, x, dedy, dedx, CROSSENTROPY); IdentityBackward(g, y, x, dedy, dedx, CROSSENTROPY);
/* check result */ /* check result */
printf("Computer de/dx:"); cpuTest = dedx->CheckData(dedxAnswer, sUnitNum);
checkData = (DTYPE*)dedx->data;
for (int i = 0; i < sUnitNum; i++) {
printf("\t%f", checkData[i]);
}
printf("\n");
printf("Real de/dx:");
for (int i = 0; i < sUnitNum; i++) {
printf("\t%f", answer[i]);
}
printf("\n");
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -179,44 +158,33 @@ bool TestIdentity2() ...@@ -179,44 +158,33 @@ bool TestIdentity2()
/* call Identity function */ /* call Identity function */
Identity(xGPU, yGPU); Identity(xGPU, yGPU);
/* check result */
printf("\nGPU Test:\n");
printf("Identity Result:");
checkData = (DTYPE*)y->data;
for (int i = 0; i < sUnitNum; i++) {
printf("\t%f", checkData[i]);
}
printf("\n");
/* call IdentityBackward function */ /* call IdentityBackward function */
IdentityBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, CROSSENTROPY); IdentityBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, CROSSENTROPY);
/* check result */ /* check result */
printf("Computer de/dx:"); gpuTest = dedxGPU->CheckData(dedxAnswer, sUnitNum);
checkData = (DTYPE*)dedxGPU->data;
int size = sUnitNum * dedxGPU->unitSize;
DTYPE * copy = new DTYPE[size];
XMemCopy(copy, -1, checkData, dedxGPU->devID, size);
for (int i = 0; i < sUnitNum; i++) {
printf("\t%f", copy[i]);
}
printf("\n");
printf("Real de/dx:");
for (int i = 0; i < sUnitNum; i++) {
printf("\t%f", answer[i]);
}
printf("\n");
/* destroy variables */ /* destroy variables */
delete x, y, g, dedx, dedy; delete x;
delete xGPU, yGPU, gGPU, dedxGPU, dedyGPU; delete y;
delete g;
delete dedx;
delete dedy;
delete xGPU;
delete yGPU;
delete gGPU;
delete dedxGPU;
delete dedyGPU;
delete[] sDimSize; delete[] sDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete x, y, g, dedx, dedy; delete x;
delete y;
delete g;
delete dedx;
delete dedy;
delete[] sDimSize; delete[] sDimSize;
return cpuTest; return cpuTest;
...@@ -232,7 +200,7 @@ bool TestIdentity2() ...@@ -232,7 +200,7 @@ bool TestIdentity2()
extern "C" extern "C"
bool TestIdentity() bool TestIdentity()
{ {
XPRINT(0, stdout, "[TEST Identity] -------------\n"); XPRINT(0, stdout, "[TEST Identity] identity function and its backward computation \n");
bool returnFlag = true, caseFlag = true; bool returnFlag = true, caseFlag = true;
/* case 1 test */ /* case 1 test */
...@@ -245,15 +213,15 @@ bool TestIdentity() ...@@ -245,15 +213,15 @@ bool TestIdentity()
else else
XPRINT(0, stdout, ">> case 1 passed!\n"); XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */ ///* case 2 test */
caseFlag = TestIdentity2(); //caseFlag = TestIdentity2();
if (!caseFlag) { //if (!caseFlag) {
returnFlag = false; // returnFlag = false;
XPRINT(0, stdout, ">> case 2 failed!\n"); // XPRINT(0, stdout, ">> case 2 failed!\n");
} //}
else //else
XPRINT(0, stdout, ">> case 2 passed!\n"); // XPRINT(0, stdout, ">> case 2 passed!\n");
/* other cases test */ /* other cases test */
/* /*
......
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-02
*/
#include "../XUtility.h"
#include "TLogSoftmax.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: test LogSoftmax function.
* LogSoftmax function: y = log(e^x / \sum_{i} e^{x_i})
*/
bool TestLogSoftmax1()
{
/* a input tensor of size (2, 3) */
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 2;
sDimSize[1] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
DTYPE xData[2][3] = { {0.0, 1.0, 2.0},
{0.5, 0.7, 1.4} };
DTYPE answer[2][3] = { {-2.4076, -1.4076, -0.4076},
{-1.5435, -1.3435, -0.6435} };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize);
XTensor * y = NewTensor(sOrder, sDimSize);
/* initialize variables */
x->SetData(xData, sUnitNum);
y->SetZeroAll();
/* call LogSoftmax function */
LogSoftmax(x, y, 1);
/* check result */
cpuTest = y->CheckData(answer, sUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
xGPU->SetData(xData, sUnitNum);
yGPU->SetZeroAll();
/* call LogSoftmax function */
LogSoftmax(xGPU, yGPU, 1);
/* check result */
gpuTest = yGPU->CheckData(answer, sUnitNum);
/* destroy variables */
delete x;
delete y;
delete xGPU;
delete yGPU;
delete[] sDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete x;
delete y;
delete z;
delete[] sDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 2: test LogSoftmaxBackward function.
* dE/dx = dE/dy * dy/dx
* log softmax: y_i = log(e^{x_i} / \sum_{k} e^{x_k})
*/
bool TestLogSoftmax2()
{
/* a input tensor of size (3) */
int sOrder = 1;
int * sDimSize = new int[sOrder];
sDimSize[0] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
DTYPE xData[3] = {0.0, 1.0, 2.0};
DTYPE gData[3] = {0.5, 0.8, 1.5};
DTYPE yAnswer[3] = {-2.4076, -1.4076, -0.4076};
DTYPE dedxAnswer[3] = {-0.409969, -0.555272, -0.834759};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize);
XTensor * y = NewTensor(sOrder, sDimSize);
XTensor * g = NewTensor(sOrder, sDimSize);
XTensor * dedy = NewTensor(sOrder, sDimSize);
XTensor * dedx = NewTensor(sOrder, sDimSize);
/* initialize variables */
x->SetData(xData, sUnitNum);
g->SetData(gData, sUnitNum);
y->SetZeroAll();
dedx->SetZeroAll();
dedy->SetZeroAll();
/* call LogSoftmax function */
LogSoftmax(x, y, 0);
/* call LogSoftmaxBackward function */
LogSoftmaxBackward(g, y, x, dedy, dedx, 0, CROSSENTROPY);
/* check result */
cpuTest = y->CheckData(yAnswer, sUnitNum) && dedx->CheckData(dedxAnswer, sUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * gGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
xGPU->SetData(xData, sUnitNum);
gGPU->SetData(gData, sUnitNum);
yGPU->SetZeroAll();
dedxGPU->SetZeroAll();
dedyGPU->SetZeroAll();
/* call LogSoftmax function */
LogSoftmax(xGPU, yGPU, 0);
/* call LogSoftmaxBackward function */
LogSoftmaxBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, 0, CROSSENTROPY);
/* check result */
gpuTest = yGPU->CheckData(yAnswer, sUnitNum) && dedxGPU->CheckData(dedxAnswer, sUnitNum);
/* destroy variables */
delete x;
delete y;
delete g;
delete dedx;
delete dedy;
delete xGPU;
delete yGPU;
delete gGPU;
delete dedxGPU;
delete dedyGPU;
delete[] sDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete x;
delete y;
delete g;
delete dedx;
delete dedy;
delete[] sDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 3: test LogSoftmaxBackward function.
* dE/dx = dE/dy * dy/dx
* log softmax: y_i = log(e^{x_i} / \sum_{k} e^{x_k})
*/
bool TestLogSoftmax3()
{
/* a tensor of size (1, 3) */
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 1;
sDimSize[1] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
DTYPE xData[1][3] = { {0.0, 1.0, 2.0} };
DTYPE gData[1][3] = { {0.5, 0.8, 1.5} };
DTYPE yAnswer[1][3] = {-2.4076, -1.4076, -0.4076};
DTYPE dedxAnswer[1][3] = {-0.409969, -0.555272, -0.834759};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize);
XTensor * y = NewTensor(sOrder, sDimSize);
XTensor * g = NewTensor(sOrder, sDimSize);
XTensor * dedy = NewTensor(sOrder, sDimSize);
XTensor * dedx = NewTensor(sOrder, sDimSize);
/* initialize variables */
x->SetData(xData, sUnitNum);
g->SetData(gData, sUnitNum);
y->SetZeroAll();
dedx->SetZeroAll();
dedy->SetZeroAll();
/* call LogSoftmax function */
LogSoftmax(x, y, 1);
/* call LogSoftmaxBackward function */
LogSoftmaxBackward(g, y, x, dedy, dedx, 1, CROSSENTROPY);
/* check result */
cpuTest = y->CheckData(yAnswer, sUnitNum) && dedx->CheckData(dedxAnswer, sUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * gGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
xGPU->SetData(xData, sUnitNum);
gGPU->SetData(gData, sUnitNum);
yGPU->SetZeroAll();
dedxGPU->SetZeroAll();
dedyGPU->SetZeroAll();
/* call LogSoftmax function */
LogSoftmax(xGPU, yGPU, 1);
/* call LogSoftmaxBackward function */
LogSoftmaxBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, 1, CROSSENTROPY);
/* check result */
gpuTest = yGPU->CheckData(yAnswer, sUnitNum) && dedxGPU->CheckData(dedxAnswer, sUnitNum);
/* destroy variables */
delete x;
delete y;
delete g;
delete dedx;
delete dedy;
delete xGPU;
delete yGPU;
delete gGPU;
delete dedxGPU;
delete dedyGPU;
delete[] sDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete x;
delete y;
delete g;
delete dedx;
delete dedy;
delete[] sDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
*/
/* test for LogSoftmax Function */
extern "C"
bool TestLogSoftmax()
{
XPRINT(0, stdout, "[TEST LogSoftmax] test log softmax function and its backward computation \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
caseFlag = TestLogSoftmax1();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
///* case 2 test */
//caseFlag = TestLogSoftmax2();
//if (!caseFlag) {
// returnFlag = false;
// XPRINT(0, stdout, ">> case 2 failed!\n");
//}
//else
// XPRINT(0, stdout, ">> case 2 passed!\n");
/* case 3 test */
caseFlag = TestLogSoftmax3();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 3 failed!\n");
}
else
XPRINT(0, stdout, ">> case 3 passed!\n");
/* other cases test */
/*
TODO!!
*/
if (returnFlag) {
XPRINT(0, stdout, ">> All Passed!\n");
}
else
XPRINT(0, stdout, ">> Failed!\n");
XPRINT(0, stdout, "\n");
return returnFlag;
}
} // namespace nts(NiuTrans.Tensor)
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-02
*/
#ifndef __TEST_LOGSOFTMAX_H__
#define __TEST_LOGSOFTMAX_H__
#include "../function/LogSoftmax.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for LogSoftmax Function */
extern "C"
bool TestLogSoftmax();
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_LOGSOFTMAX_H__
...@@ -19,91 +19,240 @@ ...@@ -19,91 +19,240 @@
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2018-04-30 * $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2018-04-30
*/ */
#include "../XTensor.h" #include "../core/ScaleAndShift.h"
#include "../XDevice.h"
#include "../function/Loss.h" #include "../function/Loss.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
namespace nts { // namespace nt(NiuTrans.Tensor) /* case 1: test LossCompute function
/* case 1 */ * In this case, Loss function name = SQUAREDERROR.
* loss = sum_{i} 0.5*(t_i - y_i)^2,
* where t_i is the gold standard and y_i is the model output
*/
bool TestLoss1() bool TestLoss1()
{ {
/* a tensor of size 10000 * 1 */ /* a tensor of size (10, 1) */
int order = 2;
int * dimSize = new int[order];
dimSize[0] = 10;
dimSize[1] = 1;
int unitNum = 1;
for (int i = 0; i < order; i++)
unitNum *= dimSize[i];
/* CPU test */
bool cpuTest = true;
DTYPE answer = 5.0F;
/* create tensors */
XTensor * output = NewTensor(order, dimSize);
XTensor * gold = NewTensor(order, dimSize);
/* initialize variables */
output->SetZeroAll();
gold->SetZeroAll();
ScaleAndShift(output, 1, 1);
ScaleAndShift(gold, 1, 2);
DTYPE error;
error = LossCompute(gold, output, SQUAREDERROR, false, 0, 0, dimSize[0], 0);
/* check results */
cpuTest = (error == answer);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensor */
XTensor * outputGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * goldGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* Initialize variables */
outputGPU->SetZeroAll();
goldGPU->SetZeroAll();
ScaleAndShift(outputGPU, 1, 1);
ScaleAndShift(goldGPU, 1, 2);
/* call LossCompute function */
error = LossCompute(goldGPU, outputGPU, SQUAREDERROR, false, 0, 0, dimSize[0], 0);
/* check results */
gpuTest = (error == answer);
/* destroy variables */
delete output;
delete gold;
delete outputGPU;
delete goldGPU;
delete[] dimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete output;
delete gold;
delete[] dimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 2: test LossCompute function
* In this case, Loss function name = CROSSENTROPY.
* loss = sum_{i} (-t_i * log(y_i))
* where t_i is the gold standard and y_i is the model output
*/
bool TestLoss2()
{
/* a tensor of size (10, 1) */
int order = 2;
int * dimSize = new int[order];
dimSize[0] = 10;
dimSize[1] = 1;
int unitNum = 1;
for (int i = 0; i < order; i++)
unitNum *= dimSize[i];
/* CPU test */
bool cpuTest = true;
DTYPE answer = 0.0F;
/* create tensors */
XTensor * output = NewTensor(order, dimSize);
XTensor * gold = NewTensor(order, dimSize);
/* initialize variables */
output->SetZeroAll();
gold->SetZeroAll();
ScaleAndShift(output, 1, 1);
ScaleAndShift(gold, 1, 2);
DTYPE error;
error = LossCompute(gold, output, CROSSENTROPY, false, 0, 0, dimSize[0], 0);
/* check results */
cpuTest = (error == answer);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensor */
XTensor * outputGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * goldGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* Initialize variables */
outputGPU->SetZeroAll();
goldGPU->SetZeroAll();
ScaleAndShift(outputGPU, 1, 1);
ScaleAndShift(goldGPU, 1, 2);
/* call LossCompute function */
error = LossCompute(goldGPU, outputGPU, CROSSENTROPY, false, 0, 0, dimSize[0], 0);
/* check results */
gpuTest = (error == answer);
/* destroy variables */
delete output;
delete gold;
delete outputGPU;
delete goldGPU;
delete[] dimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete output;
delete gold;
delete[] dimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 3: test LossCompute function
* In this case, Loss function name = ONEHOTERROR.
* loss = sum_{i} e_i
* where e_i = 0.5*(t_i - y_i)^2 if t_i = 1, e_i = 0 otherwise
*/
bool TestLoss3()
{
/* a tensor of size (10, 1) */
int order = 2; int order = 2;
int order_reduce = 1;
int * dimSize = new int[order]; int * dimSize = new int[order];
dimSize[0] = 10000; dimSize[0] = 5;
dimSize[1] = 1; dimSize[1] = 1;
int unitNum = 1; int unitNum = 1;
for (int i = 0; i < order; i++) for (int i = 0; i < order; i++)
unitNum *= dimSize[i]; unitNum *= dimSize[i];
DTYPE outputData[5][1] = { {0.5},
{0.5},
{0.5},
{0.5},
{0.5} };
DTYPE goldData[5][1] = { {1.0},
{1.0},
{0.0},
{0.0},
{0.0} };
/* CPU test */ /* CPU test */
bool cpuTest = true; bool cpuTest = true;
DTYPE answer = 0.25F;
/* create tensors */ /* create tensors */
XTensor * a = NewTensor(order, dimSize); XTensor * output = NewTensor(order, dimSize);
XTensor * b = NewTensor(order, dimSize); XTensor * gold = NewTensor(order, dimSize);
/* initialize variables */ /* initialize variables */
DTYPE* a_data = (DTYPE*)a->data; output->SetData(outputData, unitNum);
for (int i = 0; i < unitNum; i++) gold->SetData(goldData, unitNum);
*a_data++ = 1;
DTYPE* b_data = (DTYPE*)b->data;
for (int i = 0; i < unitNum; i++)
*b_data++ = 1;
DTYPE error = 0.0F;
error = LossCompute(a, b, SQUAREDERROR, false, 1, 0, dimSize[0], 0);
printf("%d", error);
/* call reduce max function */
//ReduceMax(a, reduce_a, 0);
//ReduceMax(b, reduce_b, 1);
//DTYPE* reduce_a_data = (DTYPE*)reduce_a->data;
//for (int i = 0; i < unitNum_a; i++)
// printf("%f ", *reduce_a_data++);
//printf("\n");
//DTYPE* reduce_b_data = (DTYPE*)reduce_b->data;
//for (int i = 0; i < unitNum_b; i++)
// printf("%f ", *reduce_b_data++);
DTYPE error;
error = LossCompute(gold, output, ONEHOTERROR, false, 0, 0, dimSize[0], 0);
/* check results */ /* check results */
cpuTest = true; cpuTest = (error == answer);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
/* create tensor */ /* create tensor */
XTensor * aGPU = NewTensor(order, dimSize, X_FLOAT); XTensor * outputGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * bGPU = NewTensor(order, dimSize, X_FLOAT); XTensor * goldGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* Initialize variables */ /* Initialize variables */
DTYPE* aGPU_data = (DTYPE*)aGPU->data; outputGPU->SetData(outputData, unitNum);
for (int i = 0; i < unitNum; i++) goldGPU->SetData(goldData, unitNum);
*aGPU_data++ = 1;
DTYPE* bGPU_data = (DTYPE*)bGPU->data;
for (int i = 0; i < unitNum; i++)
*bGPU_data++ = 1;
error = LossCompute(a, b, SQUAREDERROR, false, 1, 0, dimSize[0], 0);
printf("%d", error);
/* call reduce max function */
//ReduceMax(aGPU, reduce_aGPU, 0);
//ReduceMax(bGPU, reduce_bGPU, 1);
/* call LossCompute function */
error = LossCompute(goldGPU, outputGPU, ONEHOTERROR, false, 0, 0, dimSize[0], 0);
/* check results */ /* check results */
gpuTest = true; gpuTest = (error == answer);
/* destroy variables */ /* destroy variables */
delete aGPU, bGPU; delete output;
delete gold;
delete outputGPU;
delete goldGPU;
delete[] dimSize; delete[] dimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete a; delete output;
delete b; delete gold;
delete[] dimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
} }
...@@ -113,11 +262,11 @@ bool TestLoss1() ...@@ -113,11 +262,11 @@ bool TestLoss1()
TODO!! TODO!!
*/ */
/* test for Sum Function */ /* test for Loss Function */
extern "C" extern "C"
bool TestLoss() bool TestLoss()
{ {
XPRINT(0, stdout, "[TEST Loss]\n"); XPRINT(0, stdout, "[TEST Loss] compute the loss \n");
bool returnFlag = true, caseFlag = true; bool returnFlag = true, caseFlag = true;
/* case 1 test */ /* case 1 test */
...@@ -129,6 +278,23 @@ extern "C" ...@@ -129,6 +278,23 @@ extern "C"
else else
XPRINT(0, stdout, ">> case 1 passed!\n"); XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestLoss2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 2 failed!\n");
}
else
XPRINT(0, stdout, ">> case 2 passed!\n");
caseFlag = TestLoss3();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 3 failed!\n");
}
else
XPRINT(0, stdout, ">> case 3 passed!\n");
///* other cases test */ ///* other cases test */
///* ///*
//TODO!! //TODO!!
...@@ -145,4 +311,4 @@ extern "C" ...@@ -145,4 +311,4 @@ extern "C"
return returnFlag; return returnFlag;
} }
} // namespace nt(NiuTrans.Tensor) } // namespace nts(NiuTrans.Tensor)
...@@ -26,9 +26,9 @@ ...@@ -26,9 +26,9 @@
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for Sum Function */ /* test for Loss Function */
extern "C" extern "C"
bool TestLoss(); bool TestLoss();
} // namespace nts(NiuTrans.Tensor) } // namespace nts(NiuTrans.Tensor)
#endif // __TEST_SUM_H__ #endif // __TEST_LOSS_H__
...@@ -19,13 +19,12 @@ ...@@ -19,13 +19,12 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15
*/ */
#include "../XTensor.h"
#include "TMatrixMULBatchedCPU.h" #include "TMatrixMULBatchedCPU.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: matrix multiplication in batch mode (CPU code). /* case 1: matrix multiplication in batch mode (CPU code).
* In this case, aList=2*(2, 3), bList=2*(2, 3) -> c=2*(2, 2), * In this case, aList=2*(2, 3), bList=2*(3, 2) -> c=2*(2, 2),
transposedA=X_NOTRANS, transposedB=X_NOTRANS. * transposedA=X_NOTRANS, transposedB=X_NOTRANS.
*/ */
bool TestMatrixMulBatchedCPU1() bool TestMatrixMulBatchedCPU1()
{ {
...@@ -110,18 +109,12 @@ bool TestMatrixMulBatchedCPU1() ...@@ -110,18 +109,12 @@ bool TestMatrixMulBatchedCPU1()
MatrixMULBatchedCPU(aList, X_NOTRANS, bList, X_NOTRANS, cList); MatrixMULBatchedCPU(aList, X_NOTRANS, bList, X_NOTRANS, cList);
/* check results */ /* check results */
cpuTest = c1->CheckData(answer1, cUnitNum) && cpuTest; cpuTest = c1->CheckData(answer1, cUnitNum) && c2->CheckData(answer2, cUnitNum);
cpuTest = c2->CheckData(answer2, cUnitNum) && cpuTest;
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
/* clear list */
aList->Clear();
bList->Clear();
cList->Clear();
/* create tensors */ /* create tensors */
XTensor * aGPU1 = NewTensor(aOrder, aDimSize, X_FLOAT, 1.0F, 0); XTensor * aGPU1 = NewTensor(aOrder, aDimSize, X_FLOAT, 1.0F, 0);
XTensor * aGPU2 = NewTensor(aOrder, aDimSize, X_FLOAT, 1.0F, 0); XTensor * aGPU2 = NewTensor(aOrder, aDimSize, X_FLOAT, 1.0F, 0);
...@@ -137,32 +130,56 @@ bool TestMatrixMulBatchedCPU1() ...@@ -137,32 +130,56 @@ bool TestMatrixMulBatchedCPU1()
bGPU2->SetData(bData2, aUnitNum); bGPU2->SetData(bData2, aUnitNum);
cGPU1->SetZeroAll(); cGPU1->SetZeroAll();
cGPU2->SetZeroAll(); cGPU2->SetZeroAll();
/* clear list */
aList->Clear();
bList->Clear();
cList->Clear();
/* add tensors to list */ /* add tensors to list */
aList->Add(a1); aList->Add(aGPU1);
aList->Add(a2); aList->Add(aGPU2);
bList->Add(b1); bList->Add(bGPU1);
bList->Add(b2); bList->Add(bGPU2);
cList->Add(c1); cList->Add(cGPU1);
cList->Add(c2); cList->Add(cGPU2);
/* call MatrixMULBatchedCPU function */ /* call MatrixMULBatchedCPU function */
MatrixMULBatchedCPU(aList, X_NOTRANS, bList, X_NOTRANS, cList); MatrixMULBatchedCPU(aList, X_NOTRANS, bList, X_NOTRANS, cList);
/* check results */ /* check results */
gpuTest = c1->CheckData(answer1, cUnitNum) && gpuTest; gpuTest = cGPU1->CheckData(answer1, cUnitNum) && gpuTest;
gpuTest = c2->CheckData(answer2, cUnitNum) && gpuTest; gpuTest = cGPU2->CheckData(answer2, cUnitNum) && gpuTest;
/* destroy variables */ /* destroy variables */
delete a1, a2, b1, b2, c1, c2; delete a1;
delete aGPU1, aGPU2, bGPU1, bGPU2, cGPU1, cGPU2; delete a2;
delete[] aDimSize, bDimSize, cDimSize; delete b1;
delete b2;
delete c1;
delete c2;
delete aGPU1;
delete aGPU2;
delete bGPU1;
delete bGPU2;
delete cGPU1;
delete cGPU2;
delete[] aDimSize;
delete[] bDimSize;
delete[] cDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete a1, a2, b1, b2, c1, c2; delete a1;
delete[] aDimSize, bDimSize, cDimSize; delete a2;
delete b1;
delete b2;
delete c1;
delete c2;
delete[] aDimSize;
delete[] bDimSize;
delete[] cDimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
...@@ -177,7 +194,7 @@ bool TestMatrixMulBatchedCPU1() ...@@ -177,7 +194,7 @@ bool TestMatrixMulBatchedCPU1()
extern "C" extern "C"
bool TestMatrixMulBatchedCPU() bool TestMatrixMulBatchedCPU()
{ {
XPRINT(0, stdout, "[TEST MATRIXMULBATCHEDCPU] -------------\n"); XPRINT(0, stdout, "[TEST MATRIXMULBATCHEDCPU] matrix multiplication in batch mode (CPU code) \n");
bool returnFlag = true, caseFlag = true; bool returnFlag = true, caseFlag = true;
/* case 1 test */ /* case 1 test */
...@@ -190,15 +207,6 @@ bool TestMatrixMulBatchedCPU() ...@@ -190,15 +207,6 @@ bool TestMatrixMulBatchedCPU()
else else
XPRINT(0, stdout, ">> case 1 passed!\n"); XPRINT(0, stdout, ">> case 1 passed!\n");
///* case 2 test */
//caseFlag = TestMatrixMulBatchedCPU2();
//if (!caseFlag) {
// returnFlag = false;
// XPRINT(0, stdout, ">> case 2 failed!\n");
//}
//else
// XPRINT(0, stdout, ">> case 2 passed!\n");
/* other cases test */ /* other cases test */
/* /*
TODO!! TODO!!
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-14 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-14
*/ */
#include "../XTensor.h"
#include "TMatrixMul.h" #include "TMatrixMul.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -59,13 +58,13 @@ bool TestMatrixMul1() ...@@ -59,13 +58,13 @@ bool TestMatrixMul1()
for (int i = 0; i < tOrder; i++) for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i]; tUnitNum *= tDimSize[i];
DTYPE sData1[2][3] = { {1.0F, 2.0F, 3.0F}, DTYPE sData1[2][3] = { {1.0, 2.0, 3.0},
{-4.0F, 5.0F, 6.0F} }; {-4.0, 5.0, 6.0} };
DTYPE sData2[3][2] = { {0.0F, -1.0F}, DTYPE sData2[3][2] = { {0.0, -1.0},
{1.0F, 2.0F}, {1.0, 2.0},
{2.0F, 1.0F} }; {2.0, 1.0} };
DTYPE answer[2][2] = { {8.0F, 6.0F}, DTYPE answer[2][2] = { {8.0, 6.0},
{17.0F, 20.0F} }; {17.0, 20.0} };
/* CPU test */ /* CPU test */
bool cpuTest = true; bool cpuTest = true;
...@@ -167,14 +166,14 @@ bool TestMatrixMul2() ...@@ -167,14 +166,14 @@ bool TestMatrixMul2()
for (int i = 0; i < tOrder; i++) for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i]; tUnitNum *= tDimSize[i];
DTYPE sData1[3][2] = { {1.0F, -4.0F}, DTYPE sData1[3][2] = { {1.0, -4.0},
{2.0F, 5.0F}, {2.0, 5.0},
{3.0F, 6.0F} }; {3.0, 6.0} };
DTYPE sData2[3][2] = { {0.0F, -1.0F}, DTYPE sData2[3][2] = { {0.0, -1.0},
{1.0F, 2.0F}, {1.0, 2.0},
{2.0F, 1.0F} }; {2.0, 1.0} };
DTYPE answer[2][2] = { {8.0F, 6.0F}, DTYPE answer[2][2] = { {8.0, 6.0},
{17.0F, 20.0F} }; {17.0, 20.0} };
/* CPU test */ /* CPU test */
bool cpuTest = true; bool cpuTest = true;
...@@ -280,30 +279,30 @@ bool TestMatrixMul3() ...@@ -280,30 +279,30 @@ bool TestMatrixMul3()
for (int i = 0; i < tOrder; i++) for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i]; tUnitNum *= tDimSize[i];
DTYPE sData1[3][2][3] = { { {0.0F, -1.0F, 2.0}, DTYPE sData1[3][2][3] = { { {0.0, -1.0, 2.0},
{2.0F, 1.0F, 3.0} }, {2.0, 1.0, 3.0} },
{ {1.0F, 2.0F, 4.0}, { {1.0, 2.0, 4.0},
{3.0F, 1.0F, 2.0}}, {3.0, 1.0, 2.0}},
{ {-1.0F, 3.0F, 2.0}, { {-1.0, 3.0, 2.0},
{1.0F, -1.0F, 0.0} } }; {1.0, -1.0, 0.0} } };
DTYPE sData2[2][3][2] = { { {1.0F, 2.0F}, DTYPE sData2[2][3][2] = { { {1.0, 2.0},
{-4.0F, 3.0F}, {-4.0, 3.0},
{2.0F, 6.0F} }, {2.0, 6.0} },
{ {1.0F, 2.0F}, { {1.0, 2.0},
{3.0F, 4.0F}, {3.0, 4.0},
{5.0F, 6.0F} } }; {5.0, 6.0} } };
DTYPE answer[3][2][2][2] = { { { {8.0F, 9.0F}, DTYPE answer[3][2][2][2] = { { { {8.0, 9.0},
{4.0F, 25.0F} }, {4.0, 25.0} },
{ {7.0F, 8.0F}, { {7.0, 8.0},
{20.0F, 26.0F} } }, {20.0, 26.0} } },
{ { {1.0F, 32.0F}, { { {1.0, 32.0},
{3.0F, 21.0F} }, {3.0, 21.0} },
{ {27.0F, 34.0F}, { {27.0, 34.0},
{16.0F, 22.0F} } }, {16.0, 22.0} } },
{ { {-9.0F, 19.0F}, { { {-9.0, 19.0},
{5.0F, -1.0F} }, {5.0, -1.0} },
{ {18.0F, 22.0F}, { {18.0, 22.0},
{-2.0F, -2.0F} } } }; {-2.0, -2.0} } } };
/* CPU test */ /* CPU test */
bool cpuTest = true; bool cpuTest = true;
...@@ -407,21 +406,21 @@ bool TestMatrixMul4() ...@@ -407,21 +406,21 @@ bool TestMatrixMul4()
for (int i = 0; i < tOrder; i++) for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i]; tUnitNum *= tDimSize[i];
DTYPE sData1[3][2][3] = { { {0.0F, -1.0F, 2.0F}, DTYPE sData1[3][2][3] = { { {0.0, -1.0, 2.0},
{2.0F, 1.0F, 3.0F} }, {2.0, 1.0, 3.0} },
{ {1.0F, 2.0F, 4.0F}, { {1.0, 2.0, 4.0},
{3.0F, 1.0F, 2.0F}}, {3.0, 1.0, 2.0}},
{ {-1.0F, 3.0F, 2.0F}, { {-1.0, 3.0, 2.0},
{1.0F, -1.0F, 0.0F} } }; {1.0, -1.0, 0.0} } };
DTYPE sData2[3][2] = { {1.0F, 2.0F}, DTYPE sData2[3][2] = { {1.0, 2.0},
{3.0F, 4.0F}, {3.0, 4.0},
{5.0F, 6.0F} }; {5.0, 6.0} };
DTYPE answer[3][2][2] = { { {7.0F, 8.0F}, DTYPE answer[3][2][2] = { { {7.0, 8.0},
{20.0F, 26.0F} }, {20.0, 26.0} },
{ {27.0F, 34.0F}, { {27.0, 34.0},
{16.0F, 22.0F} }, {16.0, 22.0} },
{ {18.0F, 22.0F}, { {18.0, 22.0},
{-2.0F, -2.0F} } }; {-2.0, -2.0} } };
/* CPU test */ /* CPU test */
bool cpuTest = true; bool cpuTest = true;
......
...@@ -19,13 +19,12 @@ ...@@ -19,13 +19,12 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15
*/ */
#include "../XTensor.h"
#include "TMatrixMul2D.h" #include "TMatrixMul2D.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: matrix multiplication (for 2d tensors). /* case 1: matrix multiplication (for 2d tensors).
* In this case, a=(2, 3), b=(3, 2) -> c=(2, 2), transposedA=X_NOTRANS, * In this case, a=(2, 3), b=(3, 2) -> c=(2, 2),
transposedB=X_NOTRANS. * transposedA=X_NOTRANS, transposedB=X_NOTRANS.
*/ */
bool TestMatrixMul2D1() bool TestMatrixMul2D1()
{ {
...@@ -107,22 +106,33 @@ bool TestMatrixMul2D1() ...@@ -107,22 +106,33 @@ bool TestMatrixMul2D1()
gpuTest = tGPU->CheckData(answer, tUnitNum); gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1, s2, t, sGPU1, sGPU2, tGPU; delete s1;
delete[] sDimSize1, sDimSize2, tDimSize; delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete s1, s2, t; delete s1;
delete[] sDimSize1, sDimSize2, tDimSize; delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
} }
/* case 2: matrix multiplication (for 2d tensors). /* case 2: matrix multiplication (for 2d tensors).
* In this case, a=(3, 2), b=(2, 3) -> c=(2, 2), transposedA=X_TRANS, * In this case, a=(3, 2), b=(3, 2) -> c=(2, 2),
transposedB=X_NOTRANS. * transposedA=X_TRANS, transposedB=X_NOTRANS.
*/ */
bool TestMatrixMul2D2() bool TestMatrixMul2D2()
{ {
...@@ -205,14 +215,25 @@ bool TestMatrixMul2D2() ...@@ -205,14 +215,25 @@ bool TestMatrixMul2D2()
gpuTest = tGPU->CheckData(answer, tUnitNum); gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1, s2, t, sGPU1, sGPU2, tGPU; delete s1;
delete[] sDimSize1, sDimSize2, tDimSize; delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete s1, s2, t; delete s1;
delete[] sDimSize1, sDimSize2, tDimSize; delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
...@@ -227,7 +248,7 @@ bool TestMatrixMul2D2() ...@@ -227,7 +248,7 @@ bool TestMatrixMul2D2()
extern "C" extern "C"
bool TestMatrixMul2D() bool TestMatrixMul2D()
{ {
XPRINT(0, stdout, "[TEST MATRIXMUL2D] -------------\n"); XPRINT(0, stdout, "[TEST MATRIXMUL2D] matrix multiplication (for 2d tensors) \n");
bool returnFlag = true, caseFlag = true; bool returnFlag = true, caseFlag = true;
/* case 1 test */ /* case 1 test */
......
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-06
*/
#include "TMatrixMul2DParallel.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: matrix multiplication (for 2d tensors) with multi-threading.
* In this case, a=(2, 3), b=(3, 2) -> c=(2, 2),
* transposedA=X_NOTRANS, transposedB=X_NOTRANS.
*/
bool TestMatrixMul2DParallel1()
{
/* a source tensor of size (2, 3) */
int sOrder1 = 2;
int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 2;
sDimSize1[1] = 3;
int sUnitNum1 = 1;
for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i];
/* a source tensor of size (3, 2) */
int sOrder2 = 2;
int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 3;
sDimSize2[1] = 2;
int sUnitNum2 = 1;
for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i];
/* a target tensor of size (2, 2) */
int tOrder = 2;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
tDimSize[1] = 2;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData1[2][3] = { {1.0, 2.0, 3.0},
{-4.0, 5.0, 6.0} };
DTYPE sData2[3][2] = { {0.0, -1.0},
{1.0, 2.0},
{2.0, 1.0} };
DTYPE answer[2][2] = { {8.0, 6.0},
{17.0, 20.0} };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s1 = NewTensor(sOrder1, sDimSize1);
XTensor * s2 = NewTensor(sOrder2, sDimSize2);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s1->SetData(sData1, sUnitNum1);
s2->SetData(sData2, sUnitNum2);
t->SetZeroAll();
/* call MatrixMul2DParallel function */
MatrixMul2DParallel(s1, X_NOTRANS, s2, X_NOTRANS, t);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
/* destroy variables */
delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest;
}
/* case 2: matrix multiplication (for 2d tensors) with multi-threading.
* In this case, a=(3, 2), b=(3, 2) -> c=(2, 2),
* transposedA=X_TRANS, transposedB=X_NOTRANS.
*/
bool TestMatrixMul2DParallel2()
{
/* a source tensor of size (3, 2) */
int sOrder1 = 2;
int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 3;
sDimSize1[1] = 2;
int sUnitNum1 = 1;
for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i];
/* a source tensor of size (3, 2) */
int sOrder2 = 2;
int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 3;
sDimSize2[1] = 2;
int sUnitNum2 = 1;
for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i];
/* a target tensor of size (2, 2) */
int tOrder = 2;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
tDimSize[1] = 2;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData1[3][2] = { {1.0, -4.0},
{2.0, 5.0},
{3.0, 6.0} };
DTYPE sData2[3][2] = { {0.0, -1.0},
{1.0, 2.0},
{2.0, 1.0} };
DTYPE answer[2][2] = { {8.0, 6.0},
{17.0, 20.0} };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s1 = NewTensor(sOrder1, sDimSize1);
XTensor * s2 = NewTensor(sOrder2, sDimSize2);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s1->SetData(sData1, sUnitNum1);
s2->SetData(sData2, sUnitNum2);
t->SetZeroAll();
/* call MatrixMul2DParallel function */
MatrixMul2DParallel(s1, X_TRANS, s2, X_NOTRANS, t);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
/* destroy variables */
delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest;
}
/* other cases */
/*
TODO!!
*/
/* test for MatrixMul2DParallel Function */
extern "C"
bool TestMatrixMul2DParallel()
{
XPRINT(0, stdout, "[TEST MatrixMul2DParallel] matrix multiplication (for 2d tensors) with multi-threading \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
caseFlag = TestMatrixMul2DParallel1();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestMatrixMul2DParallel2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 2 failed!\n");
}
else
XPRINT(0, stdout, ">> case 2 passed!\n");
/* other cases test */
/*
TODO!!
*/
if (returnFlag) {
XPRINT(0, stdout, ">> All Passed!\n");
}
else
XPRINT(0, stdout, ">> Failed!\n");
XPRINT(0, stdout, "\n");
return returnFlag;
}
} // namespace nts(NiuTrans.Tensor)
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-06
*/
#ifndef __TEST_MATRIXMUL2DPARALLEL_H__
#define __TEST_MATRIXMUL2DPARALLEL_H__
#include "../core/MatrixMul2DParallel.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for MatrixMul2DParallel Function */
extern "C"
bool TestMatrixMul2DParallel();
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_MATRIXMUL2DPARALLEL_H__
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15
*/
#include "TMatrixMULBatched.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: matrix multiplication of the two tensors.
* In this case, a=(2, 3), b=(2, 3) -> c=(2, 2), transposedA=X_NOTRANS,
transposedB=X_NOTRANS.
*/
bool TestMatrixMulBatched1()
{
/* a source tensor of size (2, 3) */
int sOrder1 = 2;
int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 2;
sDimSize1[1] = 3;
int sUnitNum1 = 1;
for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i];
/* a source tensor of size (3, 2) */
int sOrder2 = 2;
int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 3;
sDimSize2[1] = 2;
int sUnitNum2 = 1;
for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i];
/* a target tensor of size (2, 2) */
int tOrder = 2;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
tDimSize[1] = 2;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData1[2][3] = { {1.0, 2.0, 3.0},
{-4.0, 5.0, 6.0} };
DTYPE sData2[3][2] = { {0.0, -1.0},
{1.0, 2.0},
{2.0, 1.0} };
DTYPE answer[2][2] = { {8.0, 6.0},
{17.0, 20.0} };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s1 = NewTensor(sOrder1, sDimSize1);
XTensor * s2 = NewTensor(sOrder2, sDimSize2);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s1->SetData(sData1, sUnitNum1);
s2->SetData(sData2, sUnitNum2);
t->SetZeroAll();
/* call MatrixMulBatched function */
MatrixMulBatched(s1, X_NOTRANS, s2, X_NOTRANS, t);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensor */
XTensor * sGPU1 = NewTensor(sOrder1, sDimSize1, X_FLOAT, 1.0F, 0);
XTensor * sGPU2 = NewTensor(sOrder2, sDimSize2, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
/* Initialize variables */
sGPU1->SetData(sData1, sUnitNum1);
sGPU2->SetData(sData2, sUnitNum2);
tGPU->SetZeroAll();
/* call MatrixMulBatched function */
MatrixMulBatched(sGPU1, X_NOTRANS, sGPU2, X_NOTRANS, tGPU);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 2: matrix multiplication of the two tensors.
* In this case, a=(2, 2, 3), b=(2, 3, 2) -> c=(2, 2, 2),
* transposedA=X_NOTRANS, transposedB=X_NOTRANS.
*/
bool TestMatrixMulBatched2()
{
/* a source tensor of size (2, 2, 3) */
int sOrder1 = 3;
int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 2;
sDimSize1[1] = 2;
sDimSize1[2] = 3;
int sUnitNum1 = 1;
for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i];
/* a source tensor of size (2, 3, 2) */
int sOrder2 = 3;
int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 2;
sDimSize2[1] = 3;
sDimSize2[2] = 2;
int sUnitNum2 = 1;
for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i];
/* a target tensor of size (2, 2, 2) */
int tOrder = 3;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
tDimSize[1] = 2;
tDimSize[2] = 2;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData1[2][2][3] = { { {0.0, -1.0, 2.0},
{2.0, 1.0, 3.0} },
{ {1.0, 2.0, 4.0},
{3.0, 1.0, 2.0} } };
DTYPE sData2[2][3][2] = { { {1.0, 2.0},
{-4.0, 3.0},
{2.0, 6.0} },
{ {1.0, 2.0},
{3.0, 4.0},
{5.0, 6.0} } };
DTYPE answer[2][2][2] = { { {8.0, 9.0},
{4.0, 25.0} },
{ {27.0, 34.0},
{16.0, 22.0} } };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s1 = NewTensor(sOrder1, sDimSize1);
XTensor * s2 = NewTensor(sOrder2, sDimSize2);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s1->SetData(sData1, sUnitNum1);
s2->SetData(sData2, sUnitNum2);
t->SetZeroAll();
/* call MatrixMulBatched function */
MatrixMulBatched(s1, X_NOTRANS, s2, X_NOTRANS, t);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensor */
XTensor * sGPU1 = NewTensor(sOrder1, sDimSize1, X_FLOAT, 1.0F, 0);
XTensor * sGPU2 = NewTensor(sOrder2, sDimSize2, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
/* Initialize variables */
sGPU1->SetData(sData1, sUnitNum1);
sGPU2->SetData(sData2, sUnitNum2);
tGPU->SetZeroAll();
/* call MatrixMulBatched function */
MatrixMulBatched(sGPU1, X_NOTRANS, sGPU2, X_NOTRANS, tGPU);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s1;
delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
*/
/* test for TestMatrixMulBatched Function */
extern "C"
bool TestMatrixMulBatched()
{
XPRINT(0, stdout, "[TEST MATRIXMULBATCHED] matrix multiplication of the two tensors \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
caseFlag = TestMatrixMulBatched1();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestMatrixMulBatched2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 2 failed!\n");
}
else
XPRINT(0, stdout, ">> case 2 passed!\n");
/* other cases test */
/*
TODO!!
*/
if (returnFlag) {
XPRINT(0, stdout, ">> All Passed!\n");
}
else
XPRINT(0, stdout, ">> Failed!\n");
XPRINT(0, stdout, "\n");
return returnFlag;
}
} // namespace nts(NiuTrans.Tensor)
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15
*/
#ifndef __TEST_MATRIXMULBATCHED_H__
#define __TEST_MATRIXMULBATCHED_H__
#include "../core/MatrixMulBatched.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for MatrixMulBatched Function */
extern "C"
bool TestMatrixMulBatched();
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_MATRIXMULBATCHED_H__
...@@ -88,21 +88,29 @@ bool TestMerge1() ...@@ -88,21 +88,29 @@ bool TestMerge1()
gpuTest = tGPU->CheckData(answer, tUnitNum); gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s, t, sGPU, tGPU; delete s;
delete[] sDimSize, tDimSize; delete t;
delete sGPU;
delete tGPU;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete s, t; delete s;
delete[] sDimSize, tDimSize; delete t;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
} }
/* case 2: transform a tensor by merging it along with a dimension. /* case 2: transform a tensor by merging it along with a dimension.
* In this case, (2, 2, 3) -> (4, 3), whereToMerge=1, leadingDim=0. * In this case,
(2, 2, 3) -> (4, 3), whereToMerge=1, leadingDim=0.
(2, 2, 3) -> (2, 6), whereToMerge=2, leadingDim=0.
*/ */
bool TestMerge2() bool TestMerge2()
{ {
...@@ -118,40 +126,55 @@ bool TestMerge2() ...@@ -118,40 +126,55 @@ bool TestMerge2()
sUnitNum *= sDimSize[i]; sUnitNum *= sDimSize[i];
/* a target tensor of size (4, 3) */ /* a target tensor of size (4, 3) */
int tOrder = 2; int tOrder1 = 2;
int * tDimSize = new int[tOrder]; int * tDimSize1 = new int[tOrder1];
tDimSize[0] = 4; tDimSize1[0] = 4;
tDimSize[1] = 3; tDimSize1[1] = 3;
int tUnitNum = 1; int tUnitNum1 = 1;
for (int i = 0; i < tOrder; i++) for (int i = 0; i < tOrder1; i++)
tUnitNum *= tDimSize[i]; tUnitNum1 *= tDimSize1[i];
/* a target tensor of size (2, 6) */
int tOrder2 = 2;
int * tDimSize2 = new int[tOrder2];
tDimSize2[0] = 2;
tDimSize2[1] = 6;
int tUnitNum2 = 1;
for (int i = 0; i < tOrder2; i++)
tUnitNum2 *= tDimSize2[i];
DTYPE sData[2][2][3] = { { {0.0, 1.0, 2.0}, DTYPE sData[2][2][3] = { { {0.0, 1.0, 2.0},
{4.0, 5.0, 6.0} }, {4.0, 5.0, 6.0} },
{ {-1.0, 2.0, 3.0}, { {-1.0, 2.0, 3.0},
{-4.0, -5.0, -6.0} } }; {-4.0, -5.0, -6.0} } };
DTYPE answer[4][3] = { {0.0, 1.0, 2.0}, DTYPE answer1[4][3] = { {0.0, 1.0, 2.0},
{4.0, 5.0, 6.0}, {4.0, 5.0, 6.0},
{-1.0, 2.0, 3.0}, {-1.0, 2.0, 3.0},
{-4.0, -5.0, -6.0} }; {-4.0, -5.0, -6.0} };
DTYPE answer2[2][6] = { {0.0, 1.0, 2.0, -1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, -4.0, -5.0, -6.0} };
/* CPU test */ /* CPU test */
bool cpuTest = true; bool cpuTest = true;
/* create tensors */ /* create tensors */
XTensor * s = NewTensor(sOrder, sDimSize); XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(tOrder, tDimSize); XTensor * t1 = NewTensor(tOrder1, tDimSize1);
XTensor * t2 = NewTensor(tOrder2, tDimSize2);
/* initialize variables */ /* initialize variables */
s->SetData(sData, sUnitNum); s->SetData(sData, sUnitNum);
t->SetZeroAll(); t1->SetZeroAll();
t2->SetZeroAll();
/* call merge function */ /* call merge function */
Merge(s, t, 1, 0); Merge(s, t1, 1, 0);
Merge(s, t2, 2, 0);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum); cpuTest = t1->CheckData(answer1, tUnitNum1) && t2->CheckData(answer2, tUnitNum2);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -159,121 +182,50 @@ bool TestMerge2() ...@@ -159,121 +182,50 @@ bool TestMerge2()
/* create tensor */ /* create tensor */
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0); XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0); XTensor * tGPU1 = NewTensor(tOrder1, tDimSize1, X_FLOAT, 1.0F, 0);
XTensor * tGPU2 = NewTensor(tOrder2, tDimSize2, X_FLOAT, 1.0F, 0);
/* Initialize variables */ /* Initialize variables */
sGPU->SetData(sData, sUnitNum); sGPU->SetData(sData, sUnitNum);
tGPU->SetZeroAll(); tGPU1->SetZeroAll();
tGPU2->SetZeroAll();
/* call merge function */
Merge(sGPU, tGPU, 1, 0);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s, t, sGPU, tGPU;
delete[] sDimSize, tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s, t;
delete[] sDimSize, tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 3: transform a tensor by merging it along with a dimension.
* In this case, (2, 3, 4) -> (3, 8), whereToMerge=0, leadingDim=2.
*/
bool TestMerge3()
{
/* a source tensor of size (2, 3, 4) */
int sOrder = 3;
int * sDimSize = new int[sOrder];
sDimSize[0] = 2;
sDimSize[1] = 3;
sDimSize[2] = 4;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a target tensor of size (8, 3) */
int tOrder = 2;
int * tDimSize = new int[tOrder];
tDimSize[0] = 3;
tDimSize[1] = 8;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData[2][3][4] = { { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0},
{8.0, 9.0, 10.0, 11.0} },
{ {0.0, -1.0, -2.0, -3.0},
{-4.0, -5.0, -6.0, -7.0},
{-8.0, -9.0, -10.0, -11.0} } };
DTYPE answer[3][8] = { {0.0, 1.0, 2.0, 3.0, 0.0, -1.0, -2.0, -3.0},
{4.0, 5.0, 6.0, 7.0, -4.0, -5.0, -6.0, -7.0},
{8.0, 9.0, 10.0, 11.0, -8.0, -9.0, -10.0, -11.0} };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s->SetData(sData, sUnitNum);
t->SetZeroAll();
/* call merge function */ /* call merge function */
Merge(s, t, 2, 0); Merge(sGPU, tGPU1, 1, 0);
Merge(sGPU, tGPU2, 2, 0);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum); gpuTest = tGPU1->CheckData(answer1, tUnitNum1) && tGPU2->CheckData(answer2, tUnitNum2);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensor */
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
/* Initialize variables */
sGPU->SetData(sData, sUnitNum);
tGPU->SetZeroAll();
/* call merge function */
Merge(sGPU, tGPU, 2, 0);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s, t, sGPU, tGPU; delete s;
delete[] sDimSize, tDimSize; delete t1;
delete t2;
delete sGPU;
delete tGPU1;
delete tGPU2;
delete[] sDimSize;
delete[] tDimSize1;
delete[] tDimSize2;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete s, t; delete s;
delete[] sDimSize, tDimSize; delete t1;
delete t2;
delete[] sDimSize;
delete[] tDimSize1;
delete[] tDimSize2;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
} }
/* case 4: merge small tensors into a big tensor. /* case 3: merge small tensors into a big tensor.
In this case, 2 * (2, 4) -> (4, 4), whereToMerge=0. In this case, 2 * (2, 4) -> (4, 4), whereToMerge=0.
*/ */
bool TestMerge4() bool TestMerge3()
{ {
/* create list */ /* create list */
XList * smallList = new XList(); XList * smallList = new XList();
...@@ -358,24 +310,36 @@ bool TestMerge4() ...@@ -358,24 +310,36 @@ bool TestMerge4()
/* check results */ /* check results */
cpuTest = tGPU->CheckData(answer, tUnitNum); cpuTest = tGPU->CheckData(answer, tUnitNum);
delete s1, s2, t, sGPU1, sGPU2, tGPU; /* destroy variables */
delete[] sDimSize, tDimSize; delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize;
delete[] tDimSize;
delete smallList; delete smallList;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete s1, s2, t; delete s1;
delete[] sDimSize, tDimSize; delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
delete smallList;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
} }
/* case 5: merge small tensors into a big tensor. /* case 4: merge small tensors into a big tensor.
In this case, 2 * (2, 4) -> (2, 8), whereToMerge=1. In this case, 2 * (2, 4) -> (2, 8), whereToMerge=1.
*/ */
bool TestMerge5() bool TestMerge4()
{ {
/* create list */ /* create list */
XList * smallList = new XList(); XList * smallList = new XList();
...@@ -458,15 +422,27 @@ bool TestMerge5() ...@@ -458,15 +422,27 @@ bool TestMerge5()
/* check results */ /* check results */
cpuTest = tGPU->CheckData(answer, tUnitNum); cpuTest = tGPU->CheckData(answer, tUnitNum);
delete s1, s2, t, sGPU1, sGPU2, tGPU; /* destroy variables */
delete[] sDimSize, tDimSize; delete s1;
delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize;
delete[] tDimSize;
delete smallList; delete smallList;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete s1, s2, t; delete s1;
delete[] sDimSize, tDimSize; delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
delete smallList;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
...@@ -481,7 +457,7 @@ bool TestMerge5() ...@@ -481,7 +457,7 @@ bool TestMerge5()
extern "C" extern "C"
bool TestMerge() bool TestMerge()
{ {
XPRINT(0, stdout, "[TEST MERGE] -------------\n"); XPRINT(0, stdout, "[TEST MERGE] transform a tensor by merging it alone with a dimension or merge small tensors into a big tensor\n");
bool returnFlag = true, caseFlag = true; bool returnFlag = true, caseFlag = true;
/* case 1 test */ /* case 1 test */
...@@ -521,15 +497,6 @@ bool TestMerge() ...@@ -521,15 +497,6 @@ bool TestMerge()
else else
XPRINT(0, stdout, ">> case 4 passed!\n"); XPRINT(0, stdout, ">> case 4 passed!\n");
/* case 5 test */
caseFlag = TestMerge5();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 5 failed!\n");
}
else
XPRINT(0, stdout, ">> case 5 passed!\n");
/* other cases test */ /* other cases test */
/* /*
TODO!! TODO!!
......
...@@ -19,17 +19,16 @@ ...@@ -19,17 +19,16 @@
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-15 * $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-15
*/ */
#include "../XTensor.h" #include "TMultiplyElementWise.h"
#include "../XDevice.h"
#include "../core/MultiplyElementWise.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: element-wise product of two tensors, c(i) = a(i)*b(i) + \alpha * c(i) /* case 1: element-wise product of two tensors
* In this case, (2 * 1) (2 * 1) -> (2 * 1), leadingDim=0, alpha=0. * c(i) = a(i)*b(i) + \alpha * c(i)
* In this case, (2, 1) (2, 1) -> (2, 1), leadingDim=0, alpha=0.
*/ */
bool TestMultiplyElementWise1() bool TestMultiplyElementWise1()
{ {
/* a source tensor of size 2 * 1 */ /* a source tensor of size (2, 1) */
int sOrder1 = 2; int sOrder1 = 2;
int * sDimSize1 = new int[sOrder1]; int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 2; sDimSize1[0] = 2;
...@@ -39,7 +38,7 @@ bool TestMultiplyElementWise1() ...@@ -39,7 +38,7 @@ bool TestMultiplyElementWise1()
for (int i = 0; i < sOrder1; i++) for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i]; sUnitNum1 *= sDimSize1[i];
/* a source tensor of size 2 * 1 */ /* a source tensor of size (2, 1) */
int sOrder2 = 2; int sOrder2 = 2;
int * sDimSize2 = new int[sOrder2]; int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 2; sDimSize2[0] = 2;
...@@ -49,7 +48,7 @@ bool TestMultiplyElementWise1() ...@@ -49,7 +48,7 @@ bool TestMultiplyElementWise1()
for (int i = 0; i < sOrder2; i++) for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i]; sUnitNum2 *= sDimSize2[i];
/* a target tensor of size 2 * 1 */ /* a target tensor of size (2, 1) */
int tOrder = 2; int tOrder = 2;
int * tDimSize = new int[tOrder]; int * tDimSize = new int[tOrder];
tDimSize[0] = 2; tDimSize[0] = 2;
...@@ -59,9 +58,12 @@ bool TestMultiplyElementWise1() ...@@ -59,9 +58,12 @@ bool TestMultiplyElementWise1()
for (int i = 0; i < tOrder; i++) for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i]; tUnitNum *= tDimSize[i];
DTYPE sData1[2][1] = { {0.0}, {1.0} }; DTYPE sData1[2][1] = { {0.0},
DTYPE sData2[2][1] = { {2.0}, {3.0} }; {1.0} };
DTYPE answer[2][1] = { {0.0}, {3.0} }; DTYPE sData2[2][1] = { {2.0},
{3.0} };
DTYPE answer[2][1] = { {0.0},
{3.0} };
/* CPU test */ /* CPU test */
bool cpuTest = true; bool cpuTest = true;
...@@ -76,7 +78,7 @@ bool TestMultiplyElementWise1() ...@@ -76,7 +78,7 @@ bool TestMultiplyElementWise1()
s2->SetData(sData2, sUnitNum2); s2->SetData(sData2, sUnitNum2);
t->SetZeroAll(); t->SetZeroAll();
/* call multiplyelementwise function */ /* call MultiplyElementWise function */
MultiplyElementWise(s1, s2, t, 0); MultiplyElementWise(s1, s2, t, 0);
/* check results */ /* check results */
...@@ -96,32 +98,44 @@ bool TestMultiplyElementWise1() ...@@ -96,32 +98,44 @@ bool TestMultiplyElementWise1()
sGPU2->SetData(sData2, sUnitNum2); sGPU2->SetData(sData2, sUnitNum2);
tGPU->SetZeroAll(); tGPU->SetZeroAll();
/* call multiplyelementwise function */ /* call MultiplyElementWise function */
MultiplyElementWise(sGPU1, sGPU2, tGPU, 0); MultiplyElementWise(sGPU1, sGPU2, tGPU, 0);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum); gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1, s2, t, sGPU1, sGPU2, tGPU; delete s1;
delete[] sDimSize1, sDimSize2, tDimSize; delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete s1, s2, t; delete s1;
delete[] sDimSize1, sDimSize2, tDimSize; delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
} }
/* case 2: element-wise product of two tensors, c(i) = a(i)*b(i) + \alpha * c(i) /* case 2: element-wise product of two tensors
* In this case, (2 * 2) (2 * 2) -> (2 * 2), leadingDim=0, alpha=0. * c(i) = a(i)*b(i) + \alpha * c(i)
* In this case, (2, 2) (2, 2) -> (2, 2), leadingDim=0, alpha=0.
*/ */
bool TestMultiplyElementWise2() bool TestMultiplyElementWise2()
{ {
/* a source tensor of size (2 * 2) */ /* a source tensor of size (2, 2) */
int sOrder1 = 2; int sOrder1 = 2;
int * sDimSize1 = new int[sOrder1]; int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 2; sDimSize1[0] = 2;
...@@ -131,7 +145,7 @@ bool TestMultiplyElementWise2() ...@@ -131,7 +145,7 @@ bool TestMultiplyElementWise2()
for (int i = 0; i < sOrder1; i++) for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i]; sUnitNum1 *= sDimSize1[i];
/* a source tensor of size (2 * 2) */ /* a source tensor of size (2, 2) */
int sOrder2 = 2; int sOrder2 = 2;
int * sDimSize2 = new int[sOrder2]; int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 2; sDimSize2[0] = 2;
...@@ -141,7 +155,7 @@ bool TestMultiplyElementWise2() ...@@ -141,7 +155,7 @@ bool TestMultiplyElementWise2()
for (int i = 0; i < sOrder2; i++) for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i]; sUnitNum2 *= sDimSize2[i];
/* a target tensor of size (2 * 2) */ /* a target tensor of size (2, 2) */
int tOrder = 2; int tOrder = 2;
int * tDimSize = new int[tOrder]; int * tDimSize = new int[tOrder];
tDimSize[0] = 2; tDimSize[0] = 2;
...@@ -171,7 +185,7 @@ bool TestMultiplyElementWise2() ...@@ -171,7 +185,7 @@ bool TestMultiplyElementWise2()
s2->SetData(sData2, sUnitNum2); s2->SetData(sData2, sUnitNum2);
t->SetZeroAll(); t->SetZeroAll();
/* call multiplyelementwise function */ /* call MultiplyElementWise function */
MultiplyElementWise(s1, s2, t, 0); MultiplyElementWise(s1, s2, t, 0);
/* check results */ /* check results */
...@@ -191,32 +205,43 @@ bool TestMultiplyElementWise2() ...@@ -191,32 +205,43 @@ bool TestMultiplyElementWise2()
sGPU2->SetData(sData2, sUnitNum2); sGPU2->SetData(sData2, sUnitNum2);
tGPU->SetZeroAll(); tGPU->SetZeroAll();
/* call multiplyelementwise function */ /* call MultiplyElementWise function */
MultiplyElementWise(sGPU1, sGPU2, tGPU, 0); MultiplyElementWise(sGPU1, sGPU2, tGPU, 0);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum); gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1, s2, t, sGPU1, sGPU2, tGPU; delete s1;
delete[] sDimSize1, sDimSize2, tDimSize; delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete s1, s2, t; delete s1;
delete[] sDimSize1, sDimSize2, tDimSize; delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
} }
/* case 3: element-wise product of two tensors, c(i) = a(i)*b(i) + \alpha * c(i) /* case 3: element-wise product of two tensors, c(i) = a(i)*b(i) + \alpha * c(i)
* In this case, (2 * 2) (2 * 2) -> (2 * 2), leadingDim=1, alpha=0. * In this case, (2, 2) (2, 2) -> (2, 2), leadingDim=1, alpha=0.
*/ */
bool TestMultiplyElementWise3() bool TestMultiplyElementWise3()
{ {
/* a source tensor of size (2 * 2) */ /* a source tensor of size (2, 2) */
int sOrder1 = 2; int sOrder1 = 2;
int * sDimSize1 = new int[sOrder1]; int * sDimSize1 = new int[sOrder1];
sDimSize1[0] = 2; sDimSize1[0] = 2;
...@@ -226,7 +251,7 @@ bool TestMultiplyElementWise3() ...@@ -226,7 +251,7 @@ bool TestMultiplyElementWise3()
for (int i = 0; i < sOrder1; i++) for (int i = 0; i < sOrder1; i++)
sUnitNum1 *= sDimSize1[i]; sUnitNum1 *= sDimSize1[i];
/* a source tensor of size (2 * 2) */ /* a source tensor of size (2, 2) */
int sOrder2 = 2; int sOrder2 = 2;
int * sDimSize2 = new int[sOrder2]; int * sDimSize2 = new int[sOrder2];
sDimSize2[0] = 2; sDimSize2[0] = 2;
...@@ -236,7 +261,7 @@ bool TestMultiplyElementWise3() ...@@ -236,7 +261,7 @@ bool TestMultiplyElementWise3()
for (int i = 0; i < sOrder2; i++) for (int i = 0; i < sOrder2; i++)
sUnitNum2 *= sDimSize2[i]; sUnitNum2 *= sDimSize2[i];
/* a target tensor of size (2 * 2) */ /* a target tensor of size (2, 2) */
int tOrder = 2; int tOrder = 2;
int * tDimSize = new int[tOrder]; int * tDimSize = new int[tOrder];
tDimSize[0] = 2; tDimSize[0] = 2;
...@@ -266,7 +291,7 @@ bool TestMultiplyElementWise3() ...@@ -266,7 +291,7 @@ bool TestMultiplyElementWise3()
s2->SetData(sData2, sUnitNum2); s2->SetData(sData2, sUnitNum2);
t->SetZeroAll(); t->SetZeroAll();
/* call multiplyelementwise function */ /* call MultiplyElementWise function */
MultiplyElementWise(s1, s2, t, 1); MultiplyElementWise(s1, s2, t, 1);
/* check results */ /* check results */
...@@ -286,21 +311,32 @@ bool TestMultiplyElementWise3() ...@@ -286,21 +311,32 @@ bool TestMultiplyElementWise3()
sGPU2->SetData(sData2, sUnitNum2); sGPU2->SetData(sData2, sUnitNum2);
tGPU->SetZeroAll(); tGPU->SetZeroAll();
/* call multiplyelementwise function */ /* call MultiplyElementWise function */
MultiplyElementWise(sGPU1, sGPU2, tGPU, 1); MultiplyElementWise(sGPU1, sGPU2, tGPU, 1);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum); gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1, s2, t, sGPU1, sGPU2, tGPU; delete s1;
delete[] sDimSize1, sDimSize2, tDimSize; delete s2;
delete t;
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete s1, s2, t; delete s1;
delete[] sDimSize1, sDimSize2, tDimSize; delete s2;
delete t;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
...@@ -315,7 +351,7 @@ TODO!! ...@@ -315,7 +351,7 @@ TODO!!
extern "C" extern "C"
bool TestMultiplyElementWise() bool TestMultiplyElementWise()
{ {
XPRINT(0, stdout, "[TEST MULTIPLYELEMENTWISE] -------------\n"); XPRINT(0, stdout, "[TEST MULTIPLYELEMENTWISE] element-wise product of two tensors \n");
bool returnFlag = true, caseFlag = true; bool returnFlag = true, caseFlag = true;
/* case 1 test */ /* case 1 test */
......
...@@ -19,15 +19,13 @@ ...@@ -19,15 +19,13 @@
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-14 * $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-14
*/ */
#include "../XTensor.h" #include "TNegate.h"
#include "../XDevice.h"
#include "../core/Negate.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: set every entry to its minus value */ /* case 1: set every entry to its minus value */
bool TestNegate1() bool TestNegate1()
{ {
/* a tensor of size 3 * 2 */ /* a tensor of size (3, 2) */
int aOrder = 2; int aOrder = 2;
int * aDimSize = new int[aOrder]; int * aDimSize = new int[aOrder];
aDimSize[0] = 3; aDimSize[0] = 3;
...@@ -53,12 +51,12 @@ bool TestNegate1() ...@@ -53,12 +51,12 @@ bool TestNegate1()
/* initialize variables */ /* initialize variables */
a->SetData(aData, aUnitNum); a->SetData(aData, aUnitNum);
/* call negate function */ /* call Negate function */
Negate(a); Negate(a);
/* check results */ /* check results */
cpuTest = a->CheckData(answer, aUnitNum); cpuTest = a->CheckData(answer, aUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
...@@ -69,14 +67,15 @@ bool TestNegate1() ...@@ -69,14 +67,15 @@ bool TestNegate1()
/* Initialize variables */ /* Initialize variables */
aGPU->SetData(aData, aUnitNum); aGPU->SetData(aData, aUnitNum);
/* call negate function */ /* call Negate function */
Negate(aGPU); Negate(aGPU);
/* check results */ /* check results */
gpuTest = aGPU->CheckData(answer, aUnitNum); gpuTest = aGPU->CheckData(answer, aUnitNum);
/* destroy variables */ /* destroy variables */
delete a, aGPU; delete a;
delete aGPU;
delete[] aDimSize; delete[] aDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
...@@ -92,7 +91,7 @@ bool TestNegate1() ...@@ -92,7 +91,7 @@ bool TestNegate1()
/* case 2: set every entry to its minus value */ /* case 2: set every entry to its minus value */
bool TestNegate2() bool TestNegate2()
{ {
/* a tensor of size 3 * 2 */ /* a tensor of size (3, 2) */
int aOrder = 2; int aOrder = 2;
int * aDimSize = new int[aOrder]; int * aDimSize = new int[aOrder];
aDimSize[0] = 3; aDimSize[0] = 3;
...@@ -118,7 +117,7 @@ bool TestNegate2() ...@@ -118,7 +117,7 @@ bool TestNegate2()
/* initialize variables */ /* initialize variables */
a->SetData(aData, aUnitNum); a->SetData(aData, aUnitNum);
/* call negate function */ /* call Negate function */
Negate(a); Negate(a);
/* check results */ /* check results */
...@@ -134,14 +133,15 @@ bool TestNegate2() ...@@ -134,14 +133,15 @@ bool TestNegate2()
/* Initialize variables */ /* Initialize variables */
aGPU->SetData(aData, aUnitNum); aGPU->SetData(aData, aUnitNum);
/* call negate function */ /* call Negate function */
Negate(aGPU); Negate(aGPU);
/* check results */ /* check results */
gpuTest = aGPU->CheckData(answer, aUnitNum); gpuTest = aGPU->CheckData(answer, aUnitNum);
/* destroy variables */ /* destroy variables */
delete a, aGPU; delete a;
delete aGPU;
delete[] aDimSize; delete[] aDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
...@@ -163,7 +163,7 @@ TODO!! ...@@ -163,7 +163,7 @@ TODO!!
extern "C" extern "C"
bool TestNegate() bool TestNegate()
{ {
XPRINT(0, stdout, "[TEST NEGATE] -------------\n"); XPRINT(0, stdout, "[TEST NEGATE] set every entry to its minus value \n");
bool returnFlag = true, caseFlag = true; bool returnFlag = true, caseFlag = true;
/* case 1 test */ /* case 1 test */
......
...@@ -19,17 +19,17 @@ ...@@ -19,17 +19,17 @@
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-20 * $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-20
*/ */
#include "../XTensor.h" #include "TNormalize.h"
#include "../XDevice.h"
#include "../core/Normalize.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: normalized the data with normal distribution /* case 1: normalized the data with normal distribution
* In this case, dim=0. * For an input x, y = a * (x-mean)/sqrt(variance+\epsilon) + b.
* where a and b are the scalar and bias respectively,
* and \epsilon is the adjustment parameter.
*/ */
bool TestNormalize1() bool TestNormalize1()
{ {
/* a source tensor of size 2 * 3 */ /* a source tensor of size (2, 3) */
int sOrder = 2; int sOrder = 2;
int * sDimSize = new int[sOrder]; int * sDimSize = new int[sOrder];
sDimSize[0] = 2; sDimSize[0] = 2;
...@@ -39,7 +39,7 @@ bool TestNormalize1() ...@@ -39,7 +39,7 @@ bool TestNormalize1()
for (int i = 0; i < sOrder; i++) for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i]; sUnitNum *= sDimSize[i];
/* a target tensor of size 2 * 3 */ /* a target tensor of size (2, 3) */
int tOrder = 2; int tOrder = 2;
int * tDimSize = new int[tOrder]; int * tDimSize = new int[tOrder];
tDimSize[0] = 2; tDimSize[0] = 2;
...@@ -49,7 +49,7 @@ bool TestNormalize1() ...@@ -49,7 +49,7 @@ bool TestNormalize1()
for (int i = 0; i < tOrder; i++) for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i]; tUnitNum *= tDimSize[i];
/* a mean tensor of size 3 */ /* a mean tensor of size (3) */
int meanOrder = 1; int meanOrder = 1;
int * meanDimSize = new int[meanOrder]; int * meanDimSize = new int[meanOrder];
meanDimSize[0] = 3; meanDimSize[0] = 3;
...@@ -58,7 +58,7 @@ bool TestNormalize1() ...@@ -58,7 +58,7 @@ bool TestNormalize1()
for (int i = 0; i < meanOrder; i++) for (int i = 0; i < meanOrder; i++)
meanUnitNum *= meanDimSize[i]; meanUnitNum *= meanDimSize[i];
/* a var tensor of size 3 */ /* a variance tensor of size (3) */
int varOrder = 1; int varOrder = 1;
int * varDimSize = new int[varOrder]; int * varDimSize = new int[varOrder];
varDimSize[0] = 3; varDimSize[0] = 3;
...@@ -67,7 +67,7 @@ bool TestNormalize1() ...@@ -67,7 +67,7 @@ bool TestNormalize1()
for (int i = 0; i < varOrder; i++) for (int i = 0; i < varOrder; i++)
varUnitNum *= varDimSize[i]; varUnitNum *= varDimSize[i];
/* a a tensor of size 2 * 3 */ /* a scalar tensor of size (2, 3) */
int aOrder = 2; int aOrder = 2;
int * aDimSize = new int[aOrder]; int * aDimSize = new int[aOrder];
aDimSize[0] = 2; aDimSize[0] = 2;
...@@ -77,7 +77,7 @@ bool TestNormalize1() ...@@ -77,7 +77,7 @@ bool TestNormalize1()
for (int i = 0; i < aOrder; i++) for (int i = 0; i < aOrder; i++)
aUnitNum *= aDimSize[i]; aUnitNum *= aDimSize[i];
/* a b tensor of size 2 * 3 */ /* a bias tensor of size (2, 3) */
int bOrder = 2; int bOrder = 2;
int * bDimSize = new int[bOrder]; int * bDimSize = new int[bOrder];
bDimSize[0] = 2; bDimSize[0] = 2;
...@@ -87,41 +87,39 @@ bool TestNormalize1() ...@@ -87,41 +87,39 @@ bool TestNormalize1()
for (int i = 0; i < bOrder; i++) for (int i = 0; i < bOrder; i++)
bUnitNum *= bDimSize[i]; bUnitNum *= bDimSize[i];
DTYPE sData[2][3] = { {0.5, -1.0, 2.0}, DTYPE sData[2][3] = { {1.0, 2.0, 3.0},
{3.5, -4.5, 1.0} }; {1.5, 2.5, 3.5} };
DTYPE meanData[3] = {2.0, -2.75, 1.5}; DTYPE meanData[3] = {1.0, 1.5, 2.0};
DTYPE varData[3] = {4.5, 6.125, 0.5}; DTYPE varData[3] = {1.0, 1.0, 4.0};
DTYPE aData[2][3] = { {0.0, 0.0, 0.0}, DTYPE aData[2][3] = { {1.0, 1.0, 1.0},
{0.0, 0.0, 0.0} }; {1.0, 1.0, 1.0} };
DTYPE bData[2][3] = { {0.0, 0.0, 0.0}, DTYPE answer[2][3] = { {0.0, 0.5, 0.5},
{0.0, 0.0, 0.0} }; {0.5, 1.0, 0.75} };
DTYPE answer[2][3] = { {0.0, 0.0, 0.0},
{0.0, 0.0, 0.0} };
/* CPU test */ /* CPU test */
bool cpuTest = true; bool cpuTest = true;
/* create tensors */ /* create tensors */
XTensor * s = NewTensor(sOrder, sDimSize); XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
XTensor * mean = NewTensor(meanOrder, meanDimSize); XTensor * mean = NewTensor(meanOrder, meanDimSize);
XTensor * var = NewTensor(varOrder, varDimSize); XTensor * var = NewTensor(varOrder, varDimSize);
XTensor * a = NewTensor(aOrder, aDimSize); XTensor * a = NewTensor(aOrder, aDimSize);
XTensor * b = NewTensor(bOrder, bDimSize); XTensor * b = NewTensor(bOrder, bDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */ /* initialize variables */
s->SetData(sData, sUnitNum); s->SetData(sData, sUnitNum);
mean->SetData(meanData, meanUnitNum); mean->SetData(meanData, meanUnitNum);
var->SetData(varData, varUnitNum); var->SetData(varData, varUnitNum);
a->SetData(aData, aUnitNum); a->SetData(aData, aUnitNum);
b->SetData(bData, bUnitNum); b->SetZeroAll();
t->SetZeroAll(); t->SetZeroAll();
/* call normalize function */ /* call normalize function */
Normalize(s, t, 0, mean, var, a, b, 0.0); Normalize(s, t, 0, mean, var, a, b, 0.0);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum); cpuTest = t->CheckData(answer, tUnitNum, 1e-4, 0);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -140,24 +138,50 @@ bool TestNormalize1() ...@@ -140,24 +138,50 @@ bool TestNormalize1()
meanGPU->SetData(meanData, meanUnitNum); meanGPU->SetData(meanData, meanUnitNum);
varGPU->SetData(varData, varUnitNum); varGPU->SetData(varData, varUnitNum);
aGPU->SetData(aData, aUnitNum); aGPU->SetData(aData, aUnitNum);
bGPU->SetData(bData, bUnitNum); bGPU->SetZeroAll();
tGPU->SetZeroAll(); tGPU->SetZeroAll();
/* call normalize function */ /* call Normalize function */
Normalize(sGPU, tGPU, 0, meanGPU, varGPU, aGPU, bGPU, 0.0); Normalize(sGPU, tGPU, 0, meanGPU, varGPU, aGPU, bGPU, 0.0);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum); gpuTest = tGPU->CheckData(answer, tUnitNum, 1e-4, 0);
/* destroy variables */ /* destroy variables */
delete s, t, mean, var, a, b, sGPU, tGPU, meanGPU, varGPU, aGPU, bGPU; delete s;
delete[] sDimSize, tDimSize, meanDimSize, varDimSize, aDimSize, bDimSize; delete t;
delete mean;
delete var;
delete a;
delete b;
delete sGPU;
delete tGPU;
delete meanGPU;
delete varGPU;
delete aGPU;
delete bGPU;
delete[] sDimSize;
delete[] tDimSize;
delete[] meanDimSize;
delete[] varDimSize;
delete[] aDimSize;
delete[] bDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete s, t, mean, var, a, b; delete s;
delete[] sDimSize, tDimSize, meanDimSize, varDimSize, aDimSize, bDimSize; delete t;
delete mean;
delete var;
delete a;
delete b;
delete[] sDimSize;
delete[] tDimSize;
delete[] meanDimSize;
delete[] varDimSize;
delete[] aDimSize;
delete[] bDimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
...@@ -172,7 +196,7 @@ TODO!! ...@@ -172,7 +196,7 @@ TODO!!
extern "C" extern "C"
bool TestNormalize() bool TestNormalize()
{ {
XPRINT(0, stdout, "[TEST NORMALIZE] -------------\n"); XPRINT(0, stdout, "[TEST NORMALIZE] normalized the data with normal distribution \n");
bool returnFlag = true, caseFlag = true; bool returnFlag = true, caseFlag = true;
/* case 1 test */ /* case 1 test */
......
...@@ -19,9 +19,8 @@ ...@@ -19,9 +19,8 @@
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-15 * $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-15
*/ */
#include "../XTensor.h" #include "../XUtility.h"
#include "../XDevice.h" #include "TPower.h"
#include "../core/Power.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: get the power(a, p) /* case 1: get the power(a, p)
...@@ -29,7 +28,7 @@ namespace nts { // namespace nts(NiuTrans.Tensor) ...@@ -29,7 +28,7 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
*/ */
bool TestPower1() bool TestPower1()
{ {
/* a tensor of size 3 * 2 */ /* a tensor of size (3, 2) */
int aOrder = 2; int aOrder = 2;
int * aDimSize = new int[aOrder]; int * aDimSize = new int[aOrder];
aDimSize[0] = 3; aDimSize[0] = 3;
...@@ -55,11 +54,11 @@ bool TestPower1() ...@@ -55,11 +54,11 @@ bool TestPower1()
/* initialize variables */ /* initialize variables */
a->SetData(aData, aUnitNum); a->SetData(aData, aUnitNum);
/* call power function */ /* call Power function */
Power(a, 2.0); Power(a, 2.0);
/* check results */ /* check results */
cpuTest = a->CheckData(answer, aUnitNum); cpuTest = a->CheckData(answer, aUnitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -75,10 +74,11 @@ bool TestPower1() ...@@ -75,10 +74,11 @@ bool TestPower1()
Power(aGPU, 2.0); Power(aGPU, 2.0);
/* check results */ /* check results */
gpuTest = aGPU->CheckData(answer, aUnitNum, 0.0001F); gpuTest = aGPU->CheckData(answer, aUnitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a, aGPU; delete a;
delete aGPU;
delete[] aDimSize; delete[] aDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
...@@ -96,7 +96,7 @@ bool TestPower1() ...@@ -96,7 +96,7 @@ bool TestPower1()
*/ */
bool TestPower2() bool TestPower2()
{ {
/* a tensor of size 3 * 2 */ /* a tensor of size (3, 2) */
int aOrder = 2; int aOrder = 2;
int * aDimSize = new int[aOrder]; int * aDimSize = new int[aOrder];
aDimSize[0] = 3; aDimSize[0] = 3;
...@@ -122,11 +122,11 @@ bool TestPower2() ...@@ -122,11 +122,11 @@ bool TestPower2()
/* initialize variables */ /* initialize variables */
a->SetData(aData, aUnitNum); a->SetData(aData, aUnitNum);
/* call power function */ /* call Power function */
Power(a, 1.0); Power(a, 1.0);
/* check results */ /* check results */
cpuTest = a->CheckData(answer, aUnitNum); cpuTest = a->CheckData(answer, aUnitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -138,14 +138,15 @@ bool TestPower2() ...@@ -138,14 +138,15 @@ bool TestPower2()
/* Initialize variables */ /* Initialize variables */
aGPU->SetData(aData, aUnitNum); aGPU->SetData(aData, aUnitNum);
/* call power function */ /* call Power function */
Power(aGPU, 1.0); Power(aGPU, 1.0);
/* check results */ /* check results */
gpuTest = aGPU->CheckData(answer, aUnitNum); gpuTest = aGPU->CheckData(answer, aUnitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a, aGPU; delete a;
delete aGPU;
delete[] aDimSize; delete[] aDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
...@@ -163,7 +164,7 @@ bool TestPower2() ...@@ -163,7 +164,7 @@ bool TestPower2()
*/ */
bool TestPower3() bool TestPower3()
{ {
/* a tensor of size 3 * 2 */ /* a tensor of size (3, 2) */
int aOrder = 2; int aOrder = 2;
int * aDimSize = new int[aOrder]; int * aDimSize = new int[aOrder];
aDimSize[0] = 3; aDimSize[0] = 3;
...@@ -189,11 +190,11 @@ bool TestPower3() ...@@ -189,11 +190,11 @@ bool TestPower3()
/* initialize variables */ /* initialize variables */
a->SetData(aData, aUnitNum); a->SetData(aData, aUnitNum);
/* call power function */ /* call Power function */
Power(a, 0.0); Power(a, 0.0);
/* check results */ /* check results */
cpuTest = a->CheckData(answer, aUnitNum); cpuTest = a->CheckData(answer, aUnitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -205,14 +206,15 @@ bool TestPower3() ...@@ -205,14 +206,15 @@ bool TestPower3()
/* Initialize variables */ /* Initialize variables */
aGPU->SetData(aData, aUnitNum); aGPU->SetData(aData, aUnitNum);
/* call power function */ /* call Power function */
Power(aGPU, 0.0); Power(aGPU, 0.0);
/* check results */ /* check results */
gpuTest = aGPU->CheckData(answer, aUnitNum); gpuTest = aGPU->CheckData(answer, aUnitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a, aGPU; delete a;
delete aGPU;
delete[] aDimSize; delete[] aDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
...@@ -234,7 +236,7 @@ TODO!! ...@@ -234,7 +236,7 @@ TODO!!
extern "C" extern "C"
bool TestPower() bool TestPower()
{ {
XPRINT(0, stdout, "[TEST POWER] -------------\n"); XPRINT(0, stdout, "[TEST POWER] get the power(a, p) \n");
bool returnFlag = true, caseFlag = true; bool returnFlag = true, caseFlag = true;
/* case 1 test */ /* case 1 test */
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
*/ */
/* /*
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2018-04-30 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-30
*/ */
#ifndef __TEST_REDUCEMAX_H__ #ifndef __TEST_REDUCEMAX_H__
...@@ -24,12 +24,11 @@ ...@@ -24,12 +24,11 @@
#include "../core/ReduceMax.h" #include "../core/ReduceMax.h"
namespace nts { // namespace nt(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for ReduceMax Function */ /* test for ReduceMax Function */
extern "C" extern "C"
bool TestReduceMax(); bool TestReduceMax();
} // namespace nt(NiuTrans.Tensor) } // namespace nts(NiuTrans.Tensor)
#endif // __TEST_REDUCEMAX_H__ #endif // __TEST_REDUCEMAX_H__
...@@ -19,106 +19,111 @@ ...@@ -19,106 +19,111 @@
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2018-04-30 * $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2018-04-30
*/ */
#include "../XTensor.h" #include "TReduceSum.h"
#include "../XDevice.h"
#include "../core/ReduceMean.h" namespace nts { // namespace nts(NiuTrans.Tensor)
#include "../core/ReduceMax.h" /* case 1: sum the items along a dimension of the tensor.
#include "../core/ReduceSum.h" * In this case,
(2, 4) -> (4), dim = 0
namespace nts { // namespace nt(NiuTrans.Tensor) (2, 4) -> (2), dim = 1
/* case 1 */ */
bool TestReduceSum1() bool TestReduceSum1()
{ {
/* a tensor of size 2 * 4 */ /* a tensor of size (2, 4) */
int order = 2; int sOrder = 2;
int order_reduce = 1; int * sDimSize = new int[sOrder];
int * dimSize = new int[order]; sDimSize[0] = 2;
dimSize[0] = 2; sDimSize[1] = 4;
dimSize[1] = 4;
int sUnitNum = 1;
int unitNum = 1; for (int i = 0; i < sOrder; i++)
for (int i = 0; i < order; i++) sUnitNum *= sDimSize[i];
unitNum *= dimSize[i];
/* a tensor of size 4 */ /* a tensor of size (4) */
int * dimSize_reduce_a = new int[order_reduce]; int tOrder1 = 1;
dimSize_reduce_a[0] = 4; int * tDimSize1 = new int[tOrder1];
tDimSize1[0] = 4;
int unitNum_a = 1;
for (int i = 0; i < order_reduce; i++) int tUnitNum1 = 1;
unitNum_a *= dimSize_reduce_a[i]; for (int i = 0; i < tOrder1; i++)
/* a tensor of size 2 */ tUnitNum1 *= tDimSize1[i];
int * dimSize_reduce_b = new int[order_reduce];
dimSize_reduce_b[0] = 2; /* a tensor of size (2) */
int tOrder2 = 1;
int unitNum_b = 1; int * tDimSize2 = new int[tOrder2];
for (int i = 0; i < order_reduce; i++) tDimSize2[0] = 2;
unitNum_b *= dimSize_reduce_b[i];
int tUnitNum2 = 1;
DTYPE aData[2][4] = { { 0.0, 1.0, 2.0, 3.0 }, for (int i = 0; i < tOrder2; i++)
{ 4.0, 5.0, 6.0, 7.0 } }; tUnitNum2 *= tDimSize2[i];
DTYPE bData[2][4] = { { 1.0, -1.0, -3.0, -5.0 },
{ -7.0, -9.0, -11.0, -13.0 } }; DTYPE sData[2][4] = { {0.0, 1.0, 2.0, 3.0},
DTYPE answer_a[4] = { 4.0, 6.0, 8.0, 10.0 }; {4.0, 5.0, 6.0, 7.0} };
DTYPE answer_b[2] = { -8.0, -40.0 }; DTYPE answer1[4] = {4.0, 6.0, 8.0, 10.0};
DTYPE answer2[2] = {6.0, 22.0};
/* CPU test */ /* CPU test */
bool cpuTest = true; bool cpuTest = true;
/* create tensors */ /* create tensors */
XTensor * a = NewTensor(order, dimSize); XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * reduce_a = NewTensor(order_reduce, dimSize_reduce_a); XTensor * t1 = NewTensor(tOrder1, tDimSize1);
XTensor * b = NewTensor(order, dimSize); XTensor * t2 = NewTensor(tOrder2, tDimSize2);
XTensor * reduce_b = NewTensor(order_reduce, dimSize_reduce_b);
/* initialize variables */ /* initialize variables */
a->SetData(aData, unitNum); s->SetData(sData, sUnitNum);
b->SetData(bData, unitNum); t1->SetZeroAll();
t2->SetZeroAll();
/* call reduce sum function */ /* call ReduceSum function */
ReduceSum(a, reduce_a, 0); ReduceSum(s, t1, 0);
ReduceSum(b, reduce_b, 1); ReduceSum(s, t2, 1);
//DTYPE* reduce_a_data = (DTYPE*)reduce_a->data;
//for (int i = 0; i < unitNum_a; i++)
// printf("%f ", *reduce_a_data++);
//printf("\n");
//DTYPE* reduce_b_data = (DTYPE*)reduce_b->data;
//for (int i = 0; i < unitNum_b; i++)
// printf("%f ", *reduce_b_data++);
/* check results */ /* check results */
cpuTest = reduce_a->CheckData(answer_a, unitNum_a) && reduce_b->CheckData(answer_b, unitNum_b); cpuTest = t1->CheckData(answer1, tUnitNum1) && t2->CheckData(answer2, tUnitNum2);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
/* create tensor */ /* create tensors */
XTensor * aGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0); XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * reduce_aGPU = NewTensor(order_reduce, dimSize_reduce_a, X_FLOAT, 1.0F, 0); XTensor * tGPU1 = NewTensor(tOrder1, tDimSize1, X_FLOAT, 1.0F, 0);
XTensor * bGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0); XTensor * tGPU2 = NewTensor(tOrder2, tDimSize2, X_FLOAT, 1.0F, 0);
XTensor * reduce_bGPU = NewTensor(order_reduce, dimSize_reduce_b, X_FLOAT, 1.0F, 0);
/* Initialize variables */ /* initialize variables */
aGPU->SetData(aData, unitNum); sGPU->SetData(sData, sUnitNum);
bGPU->SetData(bData, unitNum); tGPU1->SetZeroAll();
tGPU2->SetZeroAll();
/* call reduce sum function */ /* call ReduceSum function */
ReduceSum(aGPU, reduce_aGPU, 0); ReduceSum(sGPU, tGPU1, 0);
ReduceSum(bGPU, reduce_bGPU, 1); ReduceSum(sGPU, tGPU2, 1);
/* check results */ /* check results */
gpuTest = reduce_aGPU->CheckData(answer_a, unitNum_a) && reduce_bGPU->CheckData(answer_b, unitNum_b); cpuTest = tGPU1->CheckData(answer1, tUnitNum1) && tGPU2->CheckData(answer2, tUnitNum2);
/* destroy variables */ /* destroy variables */
delete aGPU, bGPU, reduce_aGPU, reduce_bGPU; delete s;
delete[] dimSize, dimSize_reduce_a, dimSize_reduce_b; delete t1;
delete t2;
delete sGPU;
delete tGPU1;
delete tGPU2;
delete[] sDimSize;
delete[] tDimSize1;
delete[] tDimSize2;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete a; delete s;
delete b; delete t1;
delete t2;
delete[] sDimSize;
delete[] tDimSize1;
delete[] tDimSize2;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
} }
...@@ -127,7 +132,7 @@ bool TestReduceSumForLargescale() ...@@ -127,7 +132,7 @@ bool TestReduceSumForLargescale()
{ {
/* a tensor of size 10000 * 500 */ /* a tensor of size 10000 * 500 */
int order = 2; int order = 2;
int order_reduce = 1; int orderReduce = 1;
int * dimSize = new int[order]; int * dimSize = new int[order];
dimSize[0] = 10000; dimSize[0] = 10000;
dimSize[1] = 500; dimSize[1] = 500;
...@@ -136,18 +141,18 @@ bool TestReduceSumForLargescale() ...@@ -136,18 +141,18 @@ bool TestReduceSumForLargescale()
for (int i = 0; i < order; i++) for (int i = 0; i < order; i++)
unitNum *= dimSize[i]; unitNum *= dimSize[i];
/* a tensor of size 500 */ /* a tensor of size 500 */
int * dimSize_reduce_a = new int[order_reduce]; int * dimSize_reduce_a = new int[orderReduce];
dimSize_reduce_a[0] = 500; dimSize_reduce_a[0] = 500;
int unitNum_a = 1; int unitNum_a = 1;
for (int i = 0; i < order_reduce; i++) for (int i = 0; i < orderReduce; i++)
unitNum_a *= dimSize_reduce_a[i]; unitNum_a *= dimSize_reduce_a[i];
/* a tensor of size 10000 */ /* a tensor of size 10000 */
int * dimSize_reduce_b = new int[order_reduce]; int * dimSize_reduce_b = new int[orderReduce];
dimSize_reduce_b[0] = 10000; dimSize_reduce_b[0] = 10000;
int unitNum_b = 1; int unitNum_b = 1;
for (int i = 0; i < order_reduce; i++) for (int i = 0; i < orderReduce; i++)
unitNum_b *= dimSize_reduce_b[i]; unitNum_b *= dimSize_reduce_b[i];
DTYPE * data = new DTYPE[5000000]; DTYPE * data = new DTYPE[5000000];
...@@ -166,9 +171,9 @@ bool TestReduceSumForLargescale() ...@@ -166,9 +171,9 @@ bool TestReduceSumForLargescale()
/* create tensors */ /* create tensors */
XTensor * a = NewTensor(order, dimSize); XTensor * a = NewTensor(order, dimSize);
XTensor * reduce_a = NewTensor(order_reduce, dimSize_reduce_a); XTensor * reduce_a = NewTensor(orderReduce, dimSize_reduce_a);
XTensor * b = NewTensor(order, dimSize); XTensor * b = NewTensor(order, dimSize);
XTensor * reduce_b = NewTensor(order_reduce, dimSize_reduce_b); XTensor * reduce_b = NewTensor(orderReduce, dimSize_reduce_b);
/* initialize variables */ /* initialize variables */
a->SetData(data, unitNum); a->SetData(data, unitNum);
...@@ -186,9 +191,9 @@ bool TestReduceSumForLargescale() ...@@ -186,9 +191,9 @@ bool TestReduceSumForLargescale()
/* create tensor */ /* create tensor */
XTensor * aGPU = NewTensor(order, dimSize, X_FLOAT); XTensor * aGPU = NewTensor(order, dimSize, X_FLOAT);
XTensor * reduce_aGPU = NewTensor(order_reduce, dimSize_reduce_a, X_FLOAT); XTensor * reduce_aGPU = NewTensor(orderReduce, dimSize_reduce_a, X_FLOAT);
XTensor * bGPU = NewTensor(order, dimSize, X_FLOAT); XTensor * bGPU = NewTensor(order, dimSize, X_FLOAT);
XTensor * reduce_bGPU = NewTensor(order_reduce, dimSize_reduce_b, X_FLOAT); XTensor * reduce_bGPU = NewTensor(orderReduce, dimSize_reduce_b, X_FLOAT);
/* Initialize variables */ /* Initialize variables */
aGPU->SetData(data, unitNum); aGPU->SetData(data, unitNum);
...@@ -222,7 +227,7 @@ TODO!! ...@@ -222,7 +227,7 @@ TODO!!
extern "C" extern "C"
bool TestReduceSum() bool TestReduceSum()
{ {
XPRINT(0, stdout, "[TEST ReduceSum]\n"); XPRINT(0, stdout, "[TEST ReduceSum] sum the items along a dimension of the tensor.\n");
bool returnFlag = true, caseFlag = true; bool returnFlag = true, caseFlag = true;
/* case 1 test */ /* case 1 test */
...@@ -259,4 +264,4 @@ bool TestReduceSum() ...@@ -259,4 +264,4 @@ bool TestReduceSum()
return returnFlag; return returnFlag;
} }
} // namespace nt(NiuTrans.Tensor) } // namespace nts(NiuTrans.Tensor)
...@@ -24,13 +24,13 @@ ...@@ -24,13 +24,13 @@
#include "../core/ReduceSum.h" #include "../core/ReduceSum.h"
namespace nts { // namespace nt(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for ReduceSum Function */ /* test for ReduceSum Function */
extern "C" extern "C"
bool TestReduceSum(); bool TestReduceSum();
} // namespace nt(NiuTrans.Tensor) } // namespace nts(NiuTrans.Tensor)
#endif // __TEST_REDUCESUM_H__ #endif // __TEST_REDUCESUM_H__
......
...@@ -19,33 +19,35 @@ ...@@ -19,33 +19,35 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/ */
#include "../XTensor.h"
#include "TReduceSumSquared.h" #include "TReduceSumSquared.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1 */ /* case 1: squared sum of the items along a dimension of the tensor.
* For a 1-dimensional data array a, sum = \sum_i (a_i - shift)^2.
* In this case, (2, 4) -> (4), dim = 0.
*/
bool TestReduceSumSquared1() bool TestReduceSumSquared1()
{ {
/* a input tensor of size 2 * 4 */ /* a input tensor of size (2, 4) */
int inputOrder = 2; int sOrder = 2;
int * inputDimSize = new int[inputOrder]; int * sDimSize = new int[sOrder];
inputDimSize[0] = 2; sDimSize[0] = 2;
inputDimSize[1] = 4; sDimSize[1] = 4;
int inputUnitNum = 1; int sUnitNum = 1;
for (int i = 0; i < inputOrder; i++) for (int i = 0; i < sOrder; i++)
inputUnitNum *= inputDimSize[i]; sUnitNum *= sDimSize[i];
/* a output tensor of size 4 */ /* a output tensor of size (4) */
int outputOrder = 1; int tOrder = 1;
int * outputDimSize = new int[outputOrder]; int * tDimSize = new int[tOrder];
outputDimSize[0] = 4; tDimSize[0] = 4;
int outputUnitNum = 1; int tUnitNum = 1;
for (int i = 0; i < outputOrder; i++) for (int i = 0; i < tOrder; i++)
outputUnitNum *= outputDimSize[i]; tUnitNum *= tDimSize[i];
/* a shift tensor of size 4 */ /* a shift tensor of size (4) */
int shiftOrder = 1; int shiftOrder = 1;
int * shiftDimSize = new int[shiftOrder]; int * shiftDimSize = new int[shiftOrder];
shiftDimSize[0] = 4; shiftDimSize[0] = 4;
...@@ -54,8 +56,8 @@ bool TestReduceSumSquared1() ...@@ -54,8 +56,8 @@ bool TestReduceSumSquared1()
for (int i = 0; i < shiftOrder; i++) for (int i = 0; i < shiftOrder; i++)
shiftUnitNum *= shiftDimSize[i]; shiftUnitNum *= shiftDimSize[i];
DTYPE inputData[2][4] = { {0.0, 1.0, 2.0, 3.0}, DTYPE sData[2][4] = { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0} }; {4.0, 5.0, 6.0, 7.0} };
DTYPE shiftData[4] = {1.0, -1.0, -1.0, 0.0}; DTYPE shiftData[4] = {1.0, -1.0, -1.0, 0.0};
DTYPE answer[4] = {10.0, 40.0, 58.0, 58.0}; DTYPE answer[4] = {10.0, 40.0, 58.0, 58.0};
...@@ -63,51 +65,164 @@ bool TestReduceSumSquared1() ...@@ -63,51 +65,164 @@ bool TestReduceSumSquared1()
bool cpuTest = true; bool cpuTest = true;
/* create tensors */ /* create tensors */
XTensor * input = NewTensor(inputOrder, inputDimSize); XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * output = NewTensor(outputOrder, outputDimSize); XTensor * t = NewTensor(tOrder, tDimSize);
XTensor * shift = NewTensor(shiftOrder, shiftDimSize);
/* initialize variables */
s->SetData(sData, sUnitNum);
shift->SetData(shiftData, shiftUnitNum);
t->SetZeroAll();
/* call ReduceSumSquared function */
ReduceSumSquared(s, t, 0, shift);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
XTensor * shiftGPU = NewTensor(shiftOrder, shiftDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
sGPU->SetData(sData, sUnitNum);
shiftGPU->SetData(shiftData, shiftUnitNum);
tGPU->SetZeroAll();
/* call ReduceSumSquared function */
ReduceSumSquared(sGPU, tGPU, 0, shiftGPU);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s;
delete t;
delete shift;
delete sGPU;
delete tGPU;
delete shiftGPU;
delete[] sDimSize;
delete[] tDimSize;
delete[] shiftDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s;
delete t;
delete shift;
delete[] sDimSize;
delete[] tDimSize;
delete[] shiftDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 1: squared sum of the items along a dimension of the tensor.
* For a 1-dimensional data array a, sum = \sum_i (a_i - shift)^2.
* In this case, (2, 4) -> (2), dim = 1.
*/
bool TestReduceSumSquared2()
{
/* a input tensor of size (2, 4) */
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 2;
sDimSize[1] = 4;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a output tensor of size (4) */
int tOrder = 1;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
/* a shift tensor of size (4) */
int shiftOrder = 1;
int * shiftDimSize = new int[shiftOrder];
shiftDimSize[0] = 2;
int shiftUnitNum = 1;
for (int i = 0; i < shiftOrder; i++)
shiftUnitNum *= shiftDimSize[i];
DTYPE sData[2][4] = { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0} };
DTYPE shiftData[2] = {-1.0, 1.0};
DTYPE answer[2] = {30.0, 86.0};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
XTensor * shift = NewTensor(shiftOrder, shiftDimSize); XTensor * shift = NewTensor(shiftOrder, shiftDimSize);
/* initialize variables */ /* initialize variables */
input->SetData(inputData, inputUnitNum); s->SetData(sData, sUnitNum);
shift->SetData(shiftData, shiftUnitNum); shift->SetData(shiftData, shiftUnitNum);
output->SetZeroAll(); t->SetZeroAll();
/* call ReduceSumSquared function */ /* call ReduceSumSquared function */
ReduceSumSquared(input, output, 0, shift); ReduceSumSquared(s, t, 1, shift);
/* check results */ /* check results */
cpuTest = output->CheckData(answer, outputUnitNum); cpuTest = t->CheckData(answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
/* create tensors */ /* create tensors */
XTensor * inputGPU = NewTensor(inputOrder, inputDimSize, X_FLOAT, 1.0F, 0); XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * outputGPU = NewTensor(outputOrder, outputDimSize, X_FLOAT, 1.0F, 0); XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
XTensor * shiftGPU = NewTensor(shiftOrder, shiftDimSize, X_FLOAT, 1.0F, 0); XTensor * shiftGPU = NewTensor(shiftOrder, shiftDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */ /* initialize variables */
inputGPU->SetData(inputData, inputUnitNum); sGPU->SetData(sData, sUnitNum);
shiftGPU->SetData(shiftData, shiftUnitNum); shiftGPU->SetData(shiftData, shiftUnitNum);
outputGPU->SetZeroAll(); tGPU->SetZeroAll();
/* call ReduceSumSquared function */ /* call ReduceSumSquared function */
ReduceSumSquared(inputGPU, outputGPU, 0, shiftGPU); ReduceSumSquared(sGPU, tGPU, 1, shiftGPU);
/* check results */ /* check results */
gpuTest = output->CheckData(answer, outputUnitNum); gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete input, output, shift; delete s;
delete inputGPU, outputGPU, shiftGPU; delete t;
delete[] inputDimSize, outputDimSize, shiftDimSize; delete shift;
delete sGPU;
delete tGPU;
delete shiftGPU;
delete[] sDimSize;
delete[] tDimSize;
delete[] shiftDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete input, output, shift; delete s;
delete[] inputDimSize, outputDimSize, shiftDimSize; delete t;
delete shift;
delete[] sDimSize;
delete[] tDimSize;
delete[] shiftDimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
...@@ -122,7 +237,7 @@ TODO!! ...@@ -122,7 +237,7 @@ TODO!!
extern "C" extern "C"
bool TestReduceSumSquared() bool TestReduceSumSquared()
{ {
XPRINT(0, stdout, "[TEST ReduceSumSquared]\n"); XPRINT(0, stdout, "[TEST ReduceSumSquared] squared sum of the items along a dimension of the tensor\n");
bool returnFlag = true, caseFlag = true; bool returnFlag = true, caseFlag = true;
/* case 1 test */ /* case 1 test */
...@@ -133,6 +248,15 @@ bool TestReduceSumSquared() ...@@ -133,6 +248,15 @@ bool TestReduceSumSquared()
} }
else else
XPRINT(0, stdout, ">> case 1 passed!\n"); XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestReduceSumSquared2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* other cases test */ /* other cases test */
/* /*
......
...@@ -19,33 +19,35 @@ ...@@ -19,33 +19,35 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/ */
#include "../XTensor.h"
#include "TReduceVariance.h" #include "TReduceVariance.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1 */ /* case 1: variance of the items along a dimension of the tensor.
* For a 1-dimensional data array a, variance = 1/n * \sum_i (a_i - mean)^2.
* In this case, (2, 4) -> (4), dim = 0.
*/
bool TestReduceVariance1() bool TestReduceVariance1()
{ {
/* a input tensor of size 2 * 4 */ /* a input tensor of size (2, 4) */
int inputOrder = 2; int sOrder = 2;
int * inputDimSize = new int[inputOrder]; int * sDimSize = new int[sOrder];
inputDimSize[0] = 2; sDimSize[0] = 2;
inputDimSize[1] = 4; sDimSize[1] = 4;
int inputUnitNum = 1; int sUnitNum = 1;
for (int i = 0; i < inputOrder; i++) for (int i = 0; i < sOrder; i++)
inputUnitNum *= inputDimSize[i]; sUnitNum *= sDimSize[i];
/* a output tensor of size 1 */ /* a output tensor of size (4) */
int outputOrder = 1; int tOrder = 1;
int * outputDimSize = new int[outputOrder]; int * tDimSize = new int[tOrder];
outputDimSize[0] = 4; tDimSize[0] = 4;
int outputUnitNum = 1; int tUnitNum = 1;
for (int i = 0; i < outputOrder; i++) for (int i = 0; i < tOrder; i++)
outputUnitNum *= outputDimSize[i]; tUnitNum *= tDimSize[i];
/* a shift tensor of size 1 */ /* a mean tensor of size (4) */
int meanOrder = 1; int meanOrder = 1;
int * meanDimSize = new int[meanOrder]; int * meanDimSize = new int[meanOrder];
meanDimSize[0] = 4; meanDimSize[0] = 4;
...@@ -54,61 +56,70 @@ bool TestReduceVariance1() ...@@ -54,61 +56,70 @@ bool TestReduceVariance1()
for (int i = 0; i < meanOrder; i++) for (int i = 0; i < meanOrder; i++)
meanUnitNum *= meanDimSize[i]; meanUnitNum *= meanDimSize[i];
DTYPE inputData[2][4] = { {0.0, 1.0, 2.0, 3.0}, DTYPE sData[2][4] = { {0.0F, 1.0F, 2.0F, 3.0F},
{4.0, 5.0, 6.0, 7.0} }; {4.0F, 5.0F, 6.0F, 7.0F} };
DTYPE meanData[4] = {2.0, 3.0, 4.0, 5.0}; DTYPE meanData[4] = {2.0F, 3.0F, 4.0F, 5.0F};
DTYPE answer[4] = {4.0, 4.0, 4.0, 4.0}; DTYPE answer[4] = {4.0F, 4.0F, 4.0F, 4.0F};
/* CPU test */ /* CPU test */
bool cpuTest = true; bool cpuTest = true;
/* create tensors */ /* create tensors */
XTensor * input = NewTensor(inputOrder, inputDimSize); XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * output = NewTensor(outputOrder, outputDimSize); XTensor * t = NewTensor(tOrder, tDimSize);
XTensor * mean = NewTensor(meanOrder, meanDimSize); XTensor * mean = NewTensor(meanOrder, meanDimSize);
/* initialize variables */ /* initialize variables */
input->SetData(inputData, inputUnitNum); s->SetData(sData, sUnitNum);
mean->SetData(meanData, meanUnitNum); mean->SetData(meanData, meanUnitNum);
output->SetZeroAll(); t->SetZeroAll();
/* call ReduceVariance function */ /* call ReduceVariance function */
ReduceVariance(input, output, 0, mean); ReduceVariance(s, t, 0, mean);
/* check results */ /* check results */
cpuTest = output->CheckData(answer, outputUnitNum); cpuTest = t->CheckData(answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
/* create tensors */ /* create tensors */
XTensor * inputGPU = NewTensor(inputOrder, inputDimSize, X_FLOAT, 1.0F, 0); XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * outputGPU = NewTensor(outputOrder, outputDimSize, X_FLOAT, 1.0F, 0); XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
XTensor * meanGPU = NewTensor(meanOrder, meanDimSize, X_FLOAT, 1.0F, 0); XTensor * meanGPU = NewTensor(meanOrder, meanDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */ /* initialize variables */
inputGPU->SetData(inputData, inputUnitNum); sGPU->SetData(sData, sUnitNum);
meanGPU->SetData(meanData, meanUnitNum); meanGPU->SetData(meanData, meanUnitNum);
outputGPU->SetZeroAll(); tGPU->SetZeroAll();
/* call ReduceVariance function */ /* call ReduceVariance function */
ReduceVariance(inputGPU, outputGPU, 0, meanGPU); ReduceVariance(sGPU, tGPU, 0, meanGPU);
/* check results */ /* check results */
gpuTest = output->CheckData(answer, outputUnitNum); gpuTest = t->CheckData(answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete input, output, mean; delete s;
delete inputGPU, outputGPU, meanGPU; delete t;
delete[] inputDimSize, outputDimSize, meanDimSize; delete mean;
delete sGPU;
delete tGPU;
delete meanGPU;
delete[] sDimSize;
delete[] tDimSize;
delete[] meanDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete input, output, mean; delete s;
delete[] inputDimSize, outputDimSize, meanDimSize; delete t;
delete mean;
delete[] sDimSize;
delete[] tDimSize;
delete[] meanDimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
...@@ -123,7 +134,7 @@ TODO!! ...@@ -123,7 +134,7 @@ TODO!!
extern "C" extern "C"
bool TestReduceVariance() bool TestReduceVariance()
{ {
XPRINT(0, stdout, "[TEST ReduceVariance]\n"); XPRINT(0, stdout, "[TEST ReduceVariance] variance of the items along a dimension of the tensor\n");
bool returnFlag = true, caseFlag = true; bool returnFlag = true, caseFlag = true;
/* case 1 test */ /* case 1 test */
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
*/ */
/* /*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-06
*/ */
#ifndef __TEST_REDUCEVARIANCE_H__ #ifndef __TEST_REDUCEVARIANCE_H__
......
...@@ -19,27 +19,28 @@ ...@@ -19,27 +19,28 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/ */
#include "../XTensor.h"
#include "TScaleAndShift.h" #include "TScaleAndShift.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1 */ /* case 1: scale and shift all tensor entires.
* p = p * scale + shift
*/
bool TestScaleAndShift1() bool TestScaleAndShift1()
{ {
/* a input tensor of size 2 * 4 */ /* a input tensor of size (2, 4) */
int inputOrder = 2; int sOrder = 2;
int * inputDimSize = new int[inputOrder]; int * sDimSize = new int[sOrder];
inputDimSize[0] = 2; sDimSize[0] = 2;
inputDimSize[1] = 4; sDimSize[1] = 4;
int inputUnitNum = 1; int sUnitNum = 1;
for (int i = 0; i < inputOrder; i++) for (int i = 0; i < sOrder; i++)
inputUnitNum *= inputDimSize[i]; sUnitNum *= sDimSize[i];
DTYPE inputData[2][4] = { {0.0, 1.0, 2.0, 3.0}, DTYPE sData[2][4] = { {0.0F, 1.0F, 2.0F, 3.0F},
{4.0, 5.0, 6.0, 7.0} }; {4.0F, 5.0F, 6.0F, 7.0F} };
DTYPE answer[2][4] = { {0.5, 2.5, 4.5, 6.5}, DTYPE answer[2][4] = { {0.5F, 2.5F, 4.5F, 6.5F},
{8.5, 10.5, 12.5, 14.5} }; {8.5F, 10.5F, 12.5F, 14.5F} };
DTYPE scaleFactor = 2.0; DTYPE scaleFactor = 2.0;
DTYPE shiftFactor = 0.5; DTYPE shiftFactor = 0.5;
...@@ -48,43 +49,43 @@ bool TestScaleAndShift1() ...@@ -48,43 +49,43 @@ bool TestScaleAndShift1()
bool cpuTest = true; bool cpuTest = true;
/* create tensors */ /* create tensors */
XTensor * input = NewTensor(inputOrder, inputDimSize); XTensor * s = NewTensor(sOrder, sDimSize);
/* initialize variables */ /* initialize variables */
input->SetData(inputData, inputUnitNum); s->SetData(sData, sUnitNum);
/* call ScaleAndShift function */ /* call ScaleAndShift function */
ScaleAndShift(input, scaleFactor, shiftFactor); ScaleAndShift(s, scaleFactor, shiftFactor);
/* check results */ /* check results */
cpuTest = input->CheckData(answer, inputUnitNum); cpuTest = s->CheckData(answer, sUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
bool gpuTest = true; bool gpuTest = true;
/* create tensors */ /* create tensors */
XTensor * inputGPU = NewTensor(inputOrder, inputDimSize, X_FLOAT, 1.0F, 0); XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */ /* initialize variables */
inputGPU->SetData(inputData, inputUnitNum); sGPU->SetData(sData, sUnitNum);
/* call ScaleAndShift function */ /* call ScaleAndShift function */
ScaleAndShift(inputGPU, scaleFactor, shiftFactor); ScaleAndShift(sGPU, scaleFactor, shiftFactor);
/* check results */ /* check results */
gpuTest = inputGPU->CheckData(answer, inputUnitNum); gpuTest = sGPU->CheckData(answer, sUnitNum);
/* destroy variables */ /* destroy variables */
delete input; delete s;
delete inputGPU; delete sGPU;
delete[] inputDimSize; delete[] sDimSize;
return cpuTest && gpuTest; return cpuTest && gpuTest;
#else #else
/* destroy variables */ /* destroy variables */
delete input; delete s;
delete[] inputDimSize; delete[] sDimSize;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
...@@ -99,7 +100,7 @@ TODO!! ...@@ -99,7 +100,7 @@ TODO!!
extern "C" extern "C"
bool TestScaleAndShift() bool TestScaleAndShift()
{ {
XPRINT(0, stdout, "[TEST ScaleAndShift]\n"); XPRINT(0, stdout, "[TEST ScaleAndShift] scale and shift all tensor entires\n");
bool returnFlag = true, caseFlag = true; bool returnFlag = true, caseFlag = true;
/* case 1 test */ /* case 1 test */
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
*/ */
/* /*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-04
*/ */
#include "TSelect.h" #include "TSelect.h"
...@@ -25,10 +25,11 @@ namespace nts { // namespace nts(NiuTrans.Tensor) ...@@ -25,10 +25,11 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: test SelectRange function. /* case 1: test SelectRange function.
* It can generate a tensor with seleccted data * It can generate a tensor with seleccted data
* in range[low,high] along the given dimension. * in range[low,high] along the given dimension.
* In this case, (2, 2, 4) -> (2, 2, 2), dim = 2, low = 1, high = 3.
*/ */
bool TestSelect1() bool TestSelect1()
{ {
/* a input tensor of size (2, 4) */ /* a input tensor of size (2, 2, 4) */
int sOrder = 3; int sOrder = 3;
int * sDimSize = new int[sOrder]; int * sDimSize = new int[sOrder];
sDimSize[0] = 2; sDimSize[0] = 2;
...@@ -39,23 +40,25 @@ bool TestSelect1() ...@@ -39,23 +40,25 @@ bool TestSelect1()
for (int i = 0; i < sOrder; i++) for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i]; sUnitNum *= sDimSize[i];
/* a output tensor of size (2, 2) */ /* a output tensor of size (2, 2, 2) */
int tOrder = 3; int tOrder = 3;
int * tDimSize = new int[tOrder]; int * tDimSize = new int[tOrder];
tDimSize[0] = 2; tDimSize[0] = 2;
tDimSize[1] = 1; tDimSize[1] = 2;
tDimSize[2] = 4; tDimSize[2] = 2;
int tUnitNum = 1; int tUnitNum = 1;
for (int i = 0; i < tOrder; i++) for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i]; tUnitNum *= tDimSize[i];
DTYPE sData[2][2][4] = { { {0.0, 1.0, 2.0, 3.0}, DTYPE sData[2][2][4] = { { {0.0F, 1.0F, 2.0F, 3.0F},
{4.0, 5.0, 6.0, 7.0} }, {4.0F, 5.0F, 6.0F, 7.0F} },
{ {1.0, 2.0, 3.0, 4.0}, { {1.0F, 2.0F, 3.0F, 4.0F},
{5.0, 6.0, 7.0, 8.0} } }; {5.0F, 6.0F, 7.0F, 8.0F} } };
DTYPE answer[2][1][4] = { { {4.0, 5.0, 6.0, 7.0} }, DTYPE answer[2][2][2] = { { {1.0F, 2.0F},
{ {5.0, 6.0, 7.0, 8.0} } }; {5.0F, 6.0F} },
{ {2.0F, 3.0F},
{6.0F, 7.0F} } };
/* CPU test */ /* CPU test */
bool cpuTest = true; bool cpuTest = true;
...@@ -69,7 +72,7 @@ bool TestSelect1() ...@@ -69,7 +72,7 @@ bool TestSelect1()
t->SetZeroAll(); t->SetZeroAll();
/* call SelectRange function */ /* call SelectRange function */
SelectRange(s, 1, 1, 2, t); SelectRange(s, 2, 1, 3, t);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum); cpuTest = t->CheckData(answer, tUnitNum);
...@@ -121,7 +124,7 @@ TODO!! ...@@ -121,7 +124,7 @@ TODO!!
extern "C" extern "C"
bool TestSelect() bool TestSelect()
{ {
XPRINT(0, stdout, "[TEST Select] scale and shift all tensor entires\n"); XPRINT(0, stdout, "[TEST Select] generate a tensor with seleccted data in range[low,high] along the given dimension \n");
bool returnFlag = true, caseFlag = true; bool returnFlag = true, caseFlag = true;
/* case 1 test */ /* case 1 test */
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论