Commit a3a7145f by liyinqiao

1. Support check data with tolerance, get data from n dim with int format; 2.…

1. Support check data with tolerance, get data from n dim with int format; 2. Bug fixed; 3. Format correction; 4. New tests
parent 2631f50f
...@@ -555,6 +555,27 @@ bool XTensor::CheckData(const void * d, int num, int beg) ...@@ -555,6 +555,27 @@ bool XTensor::CheckData(const void * d, int num, int beg)
return true; return true;
} }
bool XTensor::CheckData(const void * d, int num, float tolerance, int beg)
{
if (data == NULL || d == NULL)
return false;
CheckNTErrors(!isSparse, "TODO");
CheckNTErrors(num == unitNum - beg, "Illegal size!");
DTYPE * valuePrt = (DTYPE*)data;
DTYPE value = 0;
DTYPE * answerPrt = (DTYPE*)d;
for (int i = beg; i < num; i++) {
value = ToCPU(devID, valuePrt);
if (fabs(value - *answerPrt) > tolerance)
return false;
valuePrt++;
answerPrt++;
}
return true;
}
/* /*
set the cell to the ascending order along a given dimension set the cell to the ascending order along a given dimension
>> dim - the dimension specified >> dim - the dimension specified
...@@ -697,6 +718,63 @@ DTYPE XTensor::Get3D(int d0, int d1, int d2) ...@@ -697,6 +718,63 @@ DTYPE XTensor::Get3D(int d0, int d1, int d2)
} }
/* /*
get the value of a cell in a 1d tensor in int type
>> i - index
<< return - value of cell(i) in int
*/
int XTensor::Get1DInt(int i)
{
CheckNTErrors((order == 1), "Cannot get a 2d cell for a tensor whose order is not 2!");
CheckNTErrors((i >= 0 && i < dimSize[0]), "dimension 0 is out of range!");
CheckNTErrors((dataType == X_INT), "The tensor is not in int type.");
int dimSize[1] = {i};
void * value = GetCell(dimSize, 1);
return ToCPUInt(devID, value);
}
/*
get the value of a cell in a 2d tensor in int type
>> ni - row index
>> mi - column index
<< return - value of cell(ni, mi) in int
*/
int XTensor::Get2DInt(int ni, int mi)
{
CheckNTErrors((order == 2), "Cannot get a 2d cell for a tensor whose order is not 2!");
CheckNTErrors((ni >= 0 && ni < dimSize[0]), "dimension 0 is out of range!");
CheckNTErrors((mi >= 0 && mi < dimSize[1]), "dimension 1 is out of range!");
CheckNTErrors((dataType == X_INT), "The tensor is not in default type.");
int dims[2] = {ni, mi};
void * value = GetCell(dims, 2);
return ToCPUInt(devID, value);
}
/*
get the value of a cell in a 3d tensor in int type
>> d0 - index of dimension 0
>> d1 - index of dimension 1
>> d2 - index of dimension 2
<< return - value of cell(d0, d1, d2) in int
*/
int XTensor::Get3DInt(int d0, int d1, int d2)
{
CheckNTErrors((order == 3), "Cannot get a 2d cell for a tensor whose order is not 2!");
CheckNTErrors((d0 >= 0 && d0 < dimSize[0]), "dimension 0 is out of range!");
CheckNTErrors((d1 >= 0 && d1 < dimSize[1]), "dimension 1 is out of range!");
CheckNTErrors((d2 >= 0 && d2 < dimSize[2]), "dimension 2 is out of range!");
CheckNTErrors((dataType == X_INT), "The tensor is not in default type.");
int dims[3] = {d0, d1, d2};
void * value = GetCell(dims, 3);
return ToCPUInt(devID, value);
}
/*
get the value of a cell in the sparse tensor get the value of a cell in the sparse tensor
>> i - i-th tuple in the tuple list of the sparse tensor >> i - i-th tuple in the tuple list of the sparse tensor
<< return - value of the tuple << return - value of the tuple
......
...@@ -197,6 +197,9 @@ public: ...@@ -197,6 +197,9 @@ public:
/* check whether the data array is the same as the answer */ /* check whether the data array is the same as the answer */
bool CheckData(const void * answer, int num, int beg = 0); bool CheckData(const void * answer, int num, int beg = 0);
/* check whether the data array is the same as the answer */
bool CheckData(const void * answer, int num, float tolerance, int beg = 0);
/* set the cell to the ascending order along a given dimension */ /* set the cell to the ascending order along a given dimension */
void SetAscendingOrder(int dim); void SetAscendingOrder(int dim);
...@@ -206,15 +209,24 @@ public: ...@@ -206,15 +209,24 @@ public:
/* get the pointer to a cell */ /* get the pointer to a cell */
void * GetCell(int index[], int size = -1); void * GetCell(int index[], int size = -1);
/* get the value of a cell in a 1d tensor */ /* get the default type value of a cell in a 1d tensor */
DTYPE Get1D(int i); DTYPE Get1D(int i);
/* get the value of a cell in a 2d tensor */ /* get the default type value of a cell in a 2d tensor */
DTYPE Get2D(int ni, int mi); DTYPE Get2D(int ni, int mi);
/* get the value of a cell in a 3d tensor */ /* get the default type value of a cell in a 3d tensor */
DTYPE Get3D(int d0, int d1, int d2); DTYPE Get3D(int d0, int d1, int d2);
/* get the int value of a cell in a 1d tensor */
int Get1DInt(int i);
/* get the int value of a cell in a 2d tensor */
int Get2DInt(int ni, int mi);
/* get the int value of a cell in a 3d tensor */
int Get3DInt(int d0, int d1, int d2);
/* get the value of a cell in a sparse tensor */ /* get the value of a cell in a sparse tensor */
DTYPE GetInSparse(int i); DTYPE GetInSparse(int i);
......
...@@ -235,13 +235,20 @@ void CudaSortBig(XTensor * a, XTensor * b, XTensor * indexA, XTensor * indexB, i ...@@ -235,13 +235,20 @@ void CudaSortBig(XTensor * a, XTensor * b, XTensor * indexA, XTensor * indexB, i
int m = GetNextPower2(strideNum); int m = GetNextPower2(strideNum);
int n = stride * blockNum; int n = stride * blockNum;
void * buf = mem->AllocBuf(mem->devID, n * m * a->unitSize); /* recheck */
void * bufIndex = (indexA != NULL && indexB != NULL) ? mem->AllocBuf(mem->devID, n * m * sizeof(int)) : NULL; /*void * buf = mem->AllocBuf(mem->devID, n * m * a->unitSize);
void * bufIndex = (indexA != NULL && indexB != NULL) ? mem->AllocBuf(mem->devID, n * m * sizeof(int)) : NULL;*/
/* change by liyinqiao */
void * buf = mem != NULL ? mem->AllocBuf(a->devID, n * m * a->unitSize) : XMemAlloc(a->devID, n * m * a->unitSize);
void * bufIndex = NULL;
if (indexA != NULL && indexB != NULL) {
bufIndex = mem != NULL ? mem->AllocBuf(a->devID, n * m * sizeof(int)) : XMemAlloc(a->devID, n * m * sizeof(int));
}
int cudaGrids[3]; int cudaGrids[3];
int cudaBlocks[3]; int cudaBlocks[3];
GDevs.GetCudaThread(mem->devID, m * n, cudaGrids, cudaBlocks); GDevs.GetCudaThread(a->devID, m * n, cudaGrids, cudaBlocks);
int devIDBackup; int devIDBackup;
ProtectCudaDev(a->devID, devIDBackup); ProtectCudaDev(a->devID, devIDBackup);
...@@ -250,7 +257,7 @@ void CudaSortBig(XTensor * a, XTensor * b, XTensor * indexA, XTensor * indexB, i ...@@ -250,7 +257,7 @@ void CudaSortBig(XTensor * a, XTensor * b, XTensor * indexA, XTensor * indexB, i
KernelSetDataArray<DTYPE> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> > KernelSetDataArray<DTYPE> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> >
((DTYPE*)buf, DTYPE_MIN, m * n); ((DTYPE*)buf, DTYPE_MIN, m * n);
GDevs.GetCudaThread2D(mem->devID, strideNum, n, MAX_INT, cudaGrids, cudaBlocks); GDevs.GetCudaThread2D(a->devID, strideNum, n, MAX_INT, cudaGrids, cudaBlocks);
/* reorganize the data into a matrix */ /* reorganize the data into a matrix */
KernelReorganize<DTYPE> << <dim3(cudaGrids[1], cudaGrids[0]), dim3(cudaBlocks[1], cudaBlocks[0]) >> > KernelReorganize<DTYPE> << <dim3(cudaGrids[1], cudaGrids[0]), dim3(cudaBlocks[1], cudaBlocks[0]) >> >
...@@ -261,7 +268,7 @@ void CudaSortBig(XTensor * a, XTensor * b, XTensor * indexA, XTensor * indexB, i ...@@ -261,7 +268,7 @@ void CudaSortBig(XTensor * a, XTensor * b, XTensor * indexA, XTensor * indexB, i
KernelReorganize<int> << <dim3(cudaGrids[1], cudaGrids[0]), dim3(cudaBlocks[1], cudaBlocks[0]) >> > KernelReorganize<int> << <dim3(cudaGrids[1], cudaGrids[0]), dim3(cudaBlocks[1], cudaBlocks[0]) >> >
(indexA->data, bufIndex, stride, strideNum, blockNum, m, n); (indexA->data, bufIndex, stride, strideNum, blockNum, m, n);
GDevs.GetCudaThread2D(mem->devID, m, n, MAX_INT, cudaGrids, cudaBlocks); GDevs.GetCudaThread2D(a->devID, m, n, MAX_INT, cudaGrids, cudaBlocks);
/* bitonic sorting */ /* bitonic sorting */
for (int i = 2; i <= m; i <<= 1) { for (int i = 2; i <= m; i <<= 1) {
...@@ -277,7 +284,7 @@ void CudaSortBig(XTensor * a, XTensor * b, XTensor * indexA, XTensor * indexB, i ...@@ -277,7 +284,7 @@ void CudaSortBig(XTensor * a, XTensor * b, XTensor * indexA, XTensor * indexB, i
} }
} }
GDevs.GetCudaThread2D(mem->devID, k, n, MAX_INT, cudaGrids, cudaBlocks); GDevs.GetCudaThread2D(a->devID, k, n, MAX_INT, cudaGrids, cudaBlocks);
/* copy result to the output tensor */ /* copy result to the output tensor */
KernelReorganizeBack<DTYPE> << <dim3(cudaGrids[1], cudaGrids[0]), dim3(cudaBlocks[1], cudaBlocks[0]) >> > KernelReorganizeBack<DTYPE> << <dim3(cudaGrids[1], cudaGrids[0]), dim3(cudaBlocks[1], cudaBlocks[0]) >> >
...@@ -287,9 +294,20 @@ void CudaSortBig(XTensor * a, XTensor * b, XTensor * indexA, XTensor * indexB, i ...@@ -287,9 +294,20 @@ void CudaSortBig(XTensor * a, XTensor * b, XTensor * indexA, XTensor * indexB, i
KernelReorganizeBack<int> << <dim3(cudaGrids[1], cudaGrids[0]), dim3(cudaBlocks[1], cudaBlocks[0]) >> > KernelReorganizeBack<int> << <dim3(cudaGrids[1], cudaGrids[0]), dim3(cudaBlocks[1], cudaBlocks[0]) >> >
(bufIndex, indexB->data, m, n, stride, k, blockNum); (bufIndex, indexB->data, m, n, stride, k, blockNum);
mem->ReleaseBuf(mem->devID, n * m * a->unitSize); /* recheck */
/*mem->ReleaseBuf(mem->devID, n * m * a->unitSize);
if (indexA != NULL && indexB != NULL)
mem->ReleaseBuf(mem->devID, n * m * sizeof(int));*/
/* change by liyinqiao */
if (mem != NULL)
mem->ReleaseBuf(a->devID, n * m * a->unitSize);
else
XMemFree(a->devID, buf);
if (indexA != NULL && indexB != NULL) if (indexA != NULL && indexB != NULL)
mem->ReleaseBuf(mem->devID, n * m * sizeof(int)); if (mem != NULL)
mem->ReleaseBuf(a->devID, n * m * sizeof(int));
else
XMemFree(a->devID, bufIndex);
ProtectCudaDev(a->devID, devIDBackup); ProtectCudaDev(a->devID, devIDBackup);
} }
......
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#include "../XTensor.h"
#include "../XUtility.h"
#include "TCopyValues.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1 */
bool TestCopyValues1()
{
/* a input tensor of size 2 * 4 */
int inputOrder = 2;
int * inputDimSize = new int[inputOrder];
inputDimSize[0] = 2;
inputDimSize[1] = 4;
int inputUnitNum = 1;
for (int i = 0; i < inputOrder; i++)
inputUnitNum *= inputDimSize[i];
DTYPE inputData[2][4] = { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0} };
DTYPE scaleFactor = 2.0;
DTYPE shiftFactor = 0.5;
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * input = NewTensor(inputOrder, inputDimSize);
XTensor * output = NewTensor(inputOrder, inputDimSize);
/* initialize variables */
input->SetData(inputData, inputUnitNum);
output->SetZeroAll();
/* call CopyValues function */
CopyValues(input, output);
/* check results */
cpuTest = output->CheckData(input->data, inputUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * inputGPU = NewTensor(inputOrder, inputDimSize, X_FLOAT, 1.0F, 0);
XTensor * outputGPU = NewTensor(inputOrder, inputDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
inputGPU->SetData(inputData, inputUnitNum);
outputGPU->SetData(inputData, inputUnitNum);
/* call CopyValues function */
CopyValues(inputGPU, outputGPU);
/* check results */
DTYPE * dataGPU = (DTYPE*)inputGPU->data;
int size = inputUnitNum * inputGPU->unitSize;
char * dataCPU = new char[size];
XMemCopy(dataCPU, -1, dataGPU, inputGPU->devID, size);
gpuTest = outputGPU->CheckData(dataCPU, inputUnitNum);
/* destroy variables */
delete input, output;
delete inputGPU, outputGPU;
delete[] inputDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete input, output;
delete[] inputDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
*/
/* test for CopyValues Function */
extern "C"
bool TestCopyValues()
{
XPRINT(0, stdout, "[TEST CopyValues]\n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
caseFlag = TestCopyValues1();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* other cases test */
/*
TODO!!
*/
if (returnFlag) {
XPRINT(0, stdout, ">> All Passed!\n");
}
else
XPRINT(0, stdout, ">> Failed!\n");
XPRINT(0, stdout, "\n");
return returnFlag;
}
} // namespace nts(NiuTrans.Tensor)
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#ifndef __TEST_COPYVALUES_H__
#define __TEST_COPYVALUES_H__
#include "../core/CopyValues.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for CopyValues Function */
extern "C"
bool TestCopyValues();
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_COPYVALUES_H__
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-29
*/
#include "../XTensor.h"
#include "../XUtility.h"
#include "TIdentity.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: test Identity function.
* Identity function: y = x
*/
bool TestIdentity1()
{
/* a input tensor of size (2, 3) */
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 2;
sDimSize[1] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
DTYPE xData[2][3] = { {0.0, 1.0, 2.0},
{0.5, 0.7, 1.4} };
DTYPE answer[2][3] = { {0.0, 1.0, 2.0},
{0.5, 0.7, 1.4} };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize);
XTensor * y = NewTensor(sOrder, sDimSize);
/* initialize variables */
x->SetData(xData, sUnitNum);
y->SetZeroAll();
/* call Identity function */
Identity(x, y);
/* check result */
cpuTest = y->CheckData(answer, sUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
xGPU->SetData(xData, sUnitNum);
yGPU->SetZeroAll();
/* call Identity function */
Identity(xGPU, yGPU);
/* check result */
gpuTest = yGPU->CheckData(answer, sUnitNum);
/* destroy variables */
delete x, y;
delete xGPU, yGPU;
delete[] sDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete x, y;
delete[] sDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* case 2: test IdentityBackward function.
* IdentityBackward function: dE/dx = dE/dy * dy/dx = dE/dy
*/
bool TestIdentity2()
{
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 1;
sDimSize[1] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
DTYPE xData[1][3] = { {0.0, 1.0, 2.0} };
DTYPE gData[1][3] = { {0.0, 0.0, 1.0} };
DTYPE answer[3] = {0.090031, 0.244728, -0.334759};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize);
XTensor * y = NewTensor(sOrder, sDimSize);
XTensor * g = NewTensor(sOrder, sDimSize);
XTensor * dedy = NewTensor(sOrder, sDimSize);
XTensor * dedx = NewTensor(sOrder, sDimSize);
/* initialize variables */
x->SetData(xData, sUnitNum);
g->SetData(gData, sUnitNum);
y->SetZeroAll();
dedx->SetZeroAll();
dedy->SetZeroAll();
/* call Identity function */
Identity(x, y);
/* check result */
printf("CPU Test:\n");
printf("Identity Result:");
DTYPE * checkData = (DTYPE*)y->data;
for (int i = 0; i < sUnitNum; i++) {
printf("\t%f", checkData[i]);
}
printf("\n");
/* call IdentityBackward function */
IdentityBackward(g, y, x, dedy, dedx, CROSSENTROPY);
/* check result */
printf("Computer de/dx:");
checkData = (DTYPE*)dedx->data;
for (int i = 0; i < sUnitNum; i++) {
printf("\t%f", checkData[i]);
}
printf("\n");
printf("Real de/dx:");
for (int i = 0; i < sUnitNum; i++) {
printf("\t%f", answer[i]);
}
printf("\n");
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * gGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
xGPU->SetData(xData, sUnitNum);
gGPU->SetData(gData, sUnitNum);
yGPU->SetZeroAll();
dedxGPU->SetZeroAll();
dedyGPU->SetZeroAll();
/* call Identity function */
Identity(xGPU, yGPU);
/* check result */
printf("\nGPU Test:\n");
printf("Identity Result:");
checkData = (DTYPE*)y->data;
for (int i = 0; i < sUnitNum; i++) {
printf("\t%f", checkData[i]);
}
printf("\n");
/* call IdentityBackward function */
IdentityBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, CROSSENTROPY);
/* check result */
printf("Computer de/dx:");
checkData = (DTYPE*)dedxGPU->data;
int size = sUnitNum * dedxGPU->unitSize;
DTYPE * copy = new DTYPE[size];
XMemCopy(copy, -1, checkData, dedxGPU->devID, size);
for (int i = 0; i < sUnitNum; i++) {
printf("\t%f", copy[i]);
}
printf("\n");
printf("Real de/dx:");
for (int i = 0; i < sUnitNum; i++) {
printf("\t%f", answer[i]);
}
printf("\n");
/* destroy variables */
delete x, y, g, dedx, dedy;
delete xGPU, yGPU, gGPU, dedxGPU, dedyGPU;
delete[] sDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete x, y, g, dedx, dedy;
delete[] sDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
*/
/* test for Identity Function */
extern "C"
bool TestIdentity()
{
XPRINT(0, stdout, "[TEST Identity] -------------\n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
caseFlag = TestIdentity1();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestIdentity2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 2 failed!\n");
}
else
XPRINT(0, stdout, ">> case 2 passed!\n");
/* other cases test */
/*
TODO!!
*/
if (returnFlag) {
XPRINT(0, stdout, ">> All Passed!\n");
}
else
XPRINT(0, stdout, ">> Failed!\n");
XPRINT(0, stdout, "\n");
return returnFlag;
}
} // namespace nts(NiuTrans.Tensor)
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-29
*/
#ifndef __TEST_IDENTITY_H__
#define __TEST_IDENTITY_H__
#include "../function/Identity.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for Identity Function */
extern "C"
bool TestIdentity();
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_IDENTITY_H__
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15
*/ */
#include "../XTensor.h" #include "../XTensor.h"
#include "TMatrixMULBatchedCPU.h" #include "TMatrixMULBatchedCPU.h"
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-14 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-14
*/ */
#include "../XTensor.h" #include "../XTensor.h"
#include "TMatrixMul.h" #include "TMatrixMul.h"
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15
*/ */
#include "../XTensor.h" #include "../XTensor.h"
#include "TMatrixMul2D.h" #include "TMatrixMul2D.h"
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-13 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-13
*/ */
#include "../XTensor.h" #include "../XTensor.h"
#include "../XList.h" #include "../XList.h"
#include "TMerge.h" #include "TMerge.h"
......
...@@ -75,7 +75,7 @@ bool TestPower1() ...@@ -75,7 +75,7 @@ bool TestPower1()
Power(aGPU, 2.0); Power(aGPU, 2.0);
/* check results */ /* check results */
gpuTest = aGPU->CheckData(answer, aUnitNum); gpuTest = aGPU->CheckData(answer, aUnitNum, 0.0001F);
/* destroy variables */ /* destroy variables */
delete a, aGPU; delete a, aGPU;
......
...@@ -26,9 +26,9 @@ ...@@ -26,9 +26,9 @@
#include "../core/ReduceSum.h" #include "../core/ReduceSum.h"
namespace nts { // namespace nt(NiuTrans.Tensor) namespace nts { // namespace nt(NiuTrans.Tensor)
/* case 1 */ /* case 1 */
bool TestReduceSum1() bool TestReduceSum1()
{ {
/* a tensor of size 2 * 4 */ /* a tensor of size 2 * 4 */
int order = 2; int order = 2;
int order_reduce = 1; int order_reduce = 1;
...@@ -121,10 +121,10 @@ namespace nts { // namespace nt(NiuTrans.Tensor) ...@@ -121,10 +121,10 @@ namespace nts { // namespace nt(NiuTrans.Tensor)
delete b; delete b;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
} }
bool TestReduceSumForLargescale() bool TestReduceSumForLargescale()
{ {
/* a tensor of size 10000 * 500 */ /* a tensor of size 10000 * 500 */
int order = 2; int order = 2;
int order_reduce = 1; int order_reduce = 1;
...@@ -211,17 +211,17 @@ namespace nts { // namespace nt(NiuTrans.Tensor) ...@@ -211,17 +211,17 @@ namespace nts { // namespace nt(NiuTrans.Tensor)
delete b; delete b;
return cpuTest; return cpuTest;
#endif // USE_CUDA #endif // USE_CUDA
} }
/* other cases */ /* other cases */
/* /*
TODO!! TODO!!
*/ */
/* test for ReduceSum Function */ /* test for ReduceSum Function */
extern "C" extern "C"
bool TestReduceSum() bool TestReduceSum()
{ {
XPRINT(0, stdout, "[TEST ReduceSum]\n"); XPRINT(0, stdout, "[TEST ReduceSum]\n");
bool returnFlag = true, caseFlag = true; bool returnFlag = true, caseFlag = true;
...@@ -243,10 +243,10 @@ namespace nts { // namespace nt(NiuTrans.Tensor) ...@@ -243,10 +243,10 @@ namespace nts { // namespace nt(NiuTrans.Tensor)
else else
XPRINT(0, stdout, ">> case 2 passed!\n"); XPRINT(0, stdout, ">> case 2 passed!\n");
///* other cases test */ /* other cases test */
///* /*
//TODO!! TODO!!
//*/ */
if (returnFlag) { if (returnFlag) {
XPRINT(0, stdout, ">> All Passed!\n"); XPRINT(0, stdout, ">> All Passed!\n");
......
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#include "../XTensor.h"
#include "TReduceSumSquared.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1 */
bool TestReduceSumSquared1()
{
/* a input tensor of size 2 * 4 */
int inputOrder = 2;
int * inputDimSize = new int[inputOrder];
inputDimSize[0] = 2;
inputDimSize[1] = 4;
int inputUnitNum = 1;
for (int i = 0; i < inputOrder; i++)
inputUnitNum *= inputDimSize[i];
/* a output tensor of size 4 */
int outputOrder = 1;
int * outputDimSize = new int[outputOrder];
outputDimSize[0] = 4;
int outputUnitNum = 1;
for (int i = 0; i < outputOrder; i++)
outputUnitNum *= outputDimSize[i];
/* a shift tensor of size 4 */
int shiftOrder = 1;
int * shiftDimSize = new int[shiftOrder];
shiftDimSize[0] = 4;
int shiftUnitNum = 1;
for (int i = 0; i < shiftOrder; i++)
shiftUnitNum *= shiftDimSize[i];
DTYPE inputData[2][4] = { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0} };
DTYPE shiftData[4] = {1.0, -1.0, -1.0, 0.0};
DTYPE answer[4] = {10.0, 40.0, 58.0, 58.0};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * input = NewTensor(inputOrder, inputDimSize);
XTensor * output = NewTensor(outputOrder, outputDimSize);
XTensor * shift = NewTensor(shiftOrder, shiftDimSize);
/* initialize variables */
input->SetData(inputData, inputUnitNum);
shift->SetData(shiftData, shiftUnitNum);
output->SetZeroAll();
/* call ReduceSumSquared function */
ReduceSumSquared(input, output, 0, shift);
/* check results */
cpuTest = output->CheckData(answer, outputUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * inputGPU = NewTensor(inputOrder, inputDimSize, X_FLOAT, 1.0F, 0);
XTensor * outputGPU = NewTensor(outputOrder, outputDimSize, X_FLOAT, 1.0F, 0);
XTensor * shiftGPU = NewTensor(shiftOrder, shiftDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
inputGPU->SetData(inputData, inputUnitNum);
shiftGPU->SetData(shiftData, shiftUnitNum);
outputGPU->SetZeroAll();
/* call ReduceSumSquared function */
ReduceSumSquared(inputGPU, outputGPU, 0, shiftGPU);
/* check results */
gpuTest = output->CheckData(answer, outputUnitNum);
/* destroy variables */
delete input, output, shift;
delete inputGPU, outputGPU, shiftGPU;
delete[] inputDimSize, outputDimSize, shiftDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete input, output, shift;
delete[] inputDimSize, outputDimSize, shiftDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
*/
/* test for ReduceSumSquared Function */
extern "C"
bool TestReduceSumSquared()
{
XPRINT(0, stdout, "[TEST ReduceSumSquared]\n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
caseFlag = TestReduceSumSquared1();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* other cases test */
/*
TODO!!
*/
if (returnFlag) {
XPRINT(0, stdout, ">> All Passed!\n");
}
else
XPRINT(0, stdout, ">> Failed!\n");
XPRINT(0, stdout, "\n");
return returnFlag;
}
} // namespace nts(NiuTrans.Tensor)
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#ifndef __TEST_REDUCESUMSQUARED_H__
#define __TEST_REDUCESUMSQUARED_H__
#include "../core/ReduceSumSquared.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for ReduceSumSquared Function */
extern "C"
bool TestReduceSumSquared();
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_REDUCESUMSQUARED_H__
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#include "../XTensor.h"
#include "TReduceVariance.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1 */
bool TestReduceVariance1()
{
/* a input tensor of size 2 * 4 */
int inputOrder = 2;
int * inputDimSize = new int[inputOrder];
inputDimSize[0] = 2;
inputDimSize[1] = 4;
int inputUnitNum = 1;
for (int i = 0; i < inputOrder; i++)
inputUnitNum *= inputDimSize[i];
/* a output tensor of size 1 */
int outputOrder = 1;
int * outputDimSize = new int[outputOrder];
outputDimSize[0] = 4;
int outputUnitNum = 1;
for (int i = 0; i < outputOrder; i++)
outputUnitNum *= outputDimSize[i];
/* a shift tensor of size 1 */
int meanOrder = 1;
int * meanDimSize = new int[meanOrder];
meanDimSize[0] = 4;
int meanUnitNum = 1;
for (int i = 0; i < meanOrder; i++)
meanUnitNum *= meanDimSize[i];
DTYPE inputData[2][4] = { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0} };
DTYPE meanData[4] = {2.0, 3.0, 4.0, 5.0};
DTYPE answer[4] = {4.0, 4.0, 4.0, 4.0};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * input = NewTensor(inputOrder, inputDimSize);
XTensor * output = NewTensor(outputOrder, outputDimSize);
XTensor * mean = NewTensor(meanOrder, meanDimSize);
/* initialize variables */
input->SetData(inputData, inputUnitNum);
mean->SetData(meanData, meanUnitNum);
output->SetZeroAll();
/* call ReduceVariance function */
ReduceVariance(input, output, 0, mean);
/* check results */
cpuTest = output->CheckData(answer, outputUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * inputGPU = NewTensor(inputOrder, inputDimSize, X_FLOAT, 1.0F, 0);
XTensor * outputGPU = NewTensor(outputOrder, outputDimSize, X_FLOAT, 1.0F, 0);
XTensor * meanGPU = NewTensor(meanOrder, meanDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
inputGPU->SetData(inputData, inputUnitNum);
meanGPU->SetData(meanData, meanUnitNum);
outputGPU->SetZeroAll();
/* call ReduceVariance function */
ReduceVariance(inputGPU, outputGPU, 0, meanGPU);
/* check results */
gpuTest = output->CheckData(answer, outputUnitNum);
/* destroy variables */
delete input, output, mean;
delete inputGPU, outputGPU, meanGPU;
delete[] inputDimSize, outputDimSize, meanDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete input, output, mean;
delete[] inputDimSize, outputDimSize, meanDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
*/
/* test for ReduceVariance Function */
extern "C"
bool TestReduceVariance()
{
XPRINT(0, stdout, "[TEST ReduceVariance]\n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
caseFlag = TestReduceVariance1();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* other cases test */
/*
TODO!!
*/
if (returnFlag) {
XPRINT(0, stdout, ">> All Passed!\n");
}
else
XPRINT(0, stdout, ">> Failed!\n");
XPRINT(0, stdout, "\n");
return returnFlag;
}
} // namespace nts(NiuTrans.Tensor)
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#ifndef __TEST_REDUCEVARIANCE_H__
#define __TEST_REDUCEVARIANCE_H__
#include "../core/ReduceVariance.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for ReduceVariance Function */
extern "C"
bool TestReduceVariance();
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_REDUCEVARIANCE_H__
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#include "../XTensor.h"
#include "TScaleAndShift.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1 */
bool TestScaleAndShift1()
{
/* a input tensor of size 2 * 4 */
int inputOrder = 2;
int * inputDimSize = new int[inputOrder];
inputDimSize[0] = 2;
inputDimSize[1] = 4;
int inputUnitNum = 1;
for (int i = 0; i < inputOrder; i++)
inputUnitNum *= inputDimSize[i];
DTYPE inputData[2][4] = { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0} };
DTYPE answer[2][4] = { {0.5, 2.5, 4.5, 6.5},
{8.5, 10.5, 12.5, 14.5} };
DTYPE scaleFactor = 2.0;
DTYPE shiftFactor = 0.5;
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * input = NewTensor(inputOrder, inputDimSize);
/* initialize variables */
input->SetData(inputData, inputUnitNum);
/* call ScaleAndShift function */
ScaleAndShift(input, scaleFactor, shiftFactor);
/* check results */
cpuTest = input->CheckData(answer, inputUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * inputGPU = NewTensor(inputOrder, inputDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
inputGPU->SetData(inputData, inputUnitNum);
/* call ScaleAndShift function */
ScaleAndShift(inputGPU, scaleFactor, shiftFactor);
/* check results */
gpuTest = inputGPU->CheckData(answer, inputUnitNum);
/* destroy variables */
delete input;
delete inputGPU;
delete[] inputDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete input;
delete[] inputDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
*/
/* test for ScaleAndShift Function */
extern "C"
bool TestScaleAndShift()
{
XPRINT(0, stdout, "[TEST ScaleAndShift]\n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
caseFlag = TestScaleAndShift1();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* other cases test */
/*
TODO!!
*/
if (returnFlag) {
XPRINT(0, stdout, ">> All Passed!\n");
}
else
XPRINT(0, stdout, ">> Failed!\n");
XPRINT(0, stdout, "\n");
return returnFlag;
}
} // namespace nts(NiuTrans.Tensor)
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#ifndef __TEST_SCALEANDSHIFT_H__
#define __TEST_SCALEANDSHIFT_H__
#include "../core/ScaleAndShift.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for ScaleAndShift Function */
extern "C"
bool TestScaleAndShift();
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_SCALEANDSHIFT_H__
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#include "TSelect.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: test SelectRange function.
* It can generate a tensor with seleccted data
* in range[low,high] along the given dimension.
*/
bool TestSelect1()
{
/* a input tensor of size (2, 4) */
int sOrder = 3;
int * sDimSize = new int[sOrder];
sDimSize[0] = 2;
sDimSize[1] = 2;
sDimSize[2] = 4;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a output tensor of size (2, 2) */
int tOrder = 3;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
tDimSize[1] = 1;
tDimSize[2] = 4;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData[2][2][4] = { { {0.0, 1.0, 2.0, 3.0},
{4.0, 5.0, 6.0, 7.0} },
{ {1.0, 2.0, 3.0, 4.0},
{5.0, 6.0, 7.0, 8.0} } };
DTYPE answer[2][1][4] = { { {4.0, 5.0, 6.0, 7.0} },
{ {5.0, 6.0, 7.0, 8.0} } };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s->SetData(sData, sUnitNum);
t->SetZeroAll();
/* call SelectRange function */
SelectRange(s, 1, 1, 2, t);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
return cpuTest;
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
sGPU->SetData(sData, sUnitNum);
tGPU->SetZeroAll();
/* call Select function */
SelectRange(sGPU, 1, 1, 3, tGPU);
/* check results */
gpuTest = tGPU->CheckData(answer, sUnitNum);
/* destroy variables */
delete s;
delete t;
delete sGPU;
delete tGPU;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s;
delete t;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
*/
/* test for Select Function */
extern "C"
bool TestSelect()
{
XPRINT(0, stdout, "[TEST Select] scale and shift all tensor entires\n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
caseFlag = TestSelect1();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* other cases test */
/*
TODO!!
*/
if (returnFlag) {
XPRINT(0, stdout, ">> All Passed!\n");
}
else
XPRINT(0, stdout, ">> Failed!\n");
XPRINT(0, stdout, "\n");
return returnFlag;
}
} // namespace nts(NiuTrans.Tensor)
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#ifndef __TEST_SELECT_H__
#define __TEST_SELECT_H__
#include "../core/Select.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for Select Function */
extern "C"
bool TestSelect();
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_SELECT_H__
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-19 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-19
*/ */
#include "../XTensor.h" #include "../XTensor.h"
#include "../XUtility.h" #include "../XUtility.h"
#include "TSigmoid.h" #include "TSigmoid.h"
......
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#include "../XTensor.h"
#include "TTopK.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1 */
bool TestTopK1()
{
/* a input tensor of size 2 * 4 */
int inputOrder = 2;
int * inputDimSize = new int[inputOrder];
inputDimSize[0] = 2;
inputDimSize[1] = 4;
int inputUnitNum = 1;
for (int i = 0; i < inputOrder; i++)
inputUnitNum *= inputDimSize[i];
/* a output tensor of size 2 * 4 */
int outputOrder = 2;
int * outputDimSize = new int[outputOrder];
outputDimSize[0] = 2;
outputDimSize[1] = 4;
int outputUnitNum = 1;
for (int i = 0; i < outputOrder; i++)
outputUnitNum *= outputDimSize[i];
DTYPE inputData[2][4] = { {5.0, 1.0, 2.0, 8.0},
{4.0, 3.0, 7.0, 6.0} };
DTYPE outputAnswerA[2][4] = { {5.0, 3.0, 7.0, 8.0},
{4.0, 1.0, 2.0, 6.0} };
int indexAnswerA[2][4] = { {0, 1, 1, 0},
{1, 0, 0, 1} };
DTYPE outputAnswerB[2][4] = { {8.0, 5.0, 2.0, 1.0},
{7.0, 6.0, 4.0, 3.0} };
int indexAnswerB[2][4] = { {3, 0, 2, 1},
{2, 3, 0, 1} };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * input = NewTensor(inputOrder, inputDimSize);
XTensor * outputA = NewTensor(outputOrder, outputDimSize);
XTensor * outputB = NewTensor(outputOrder, outputDimSize);
XTensor * indexA = NewTensor(outputOrder, outputDimSize, X_INT);
XTensor * indexB = NewTensor(outputOrder, outputDimSize, X_INT);
/* initialize variables */
input->SetData(inputData, inputUnitNum);
outputA->SetZeroAll();
outputB->SetZeroAll();
indexA->SetZeroAll();
indexB->SetZeroAll();
/* call TopK function */
int dim = 0;
int k = inputDimSize[dim];
TopK(input, outputA, indexA, dim, k);
dim = 1;
k = inputDimSize[dim];
TopK(input, outputB, indexB, dim, k);
// {
// /* CPU check data */
// printf("CPU output data:\n");
// XTensor * tensor1 = outputA;
//DTYPE * checkData = (DTYPE*)tensor1->data;
//for (int i = 0; i < tensor1->unitNum; i++)
// printf("%.2f\t", *checkData++);
//printf("\n");
// }
// {
// /* CPU index data */
// printf("CPU index data:\n");
// XTensor * tensor1 = index;
//int * checkData = (int*)tensor1->data;
//for (int i = 0; i < tensor1->unitNum; i++)
// printf("%d\t", *checkData++);
//printf("\n");
// }
/* check results */
cpuTest = outputA->CheckData(outputAnswerA, outputUnitNum) &&
outputB->CheckData(outputAnswerB, outputUnitNum) &&
indexA->CheckData(indexAnswerA, outputUnitNum) &&
indexB->CheckData(indexAnswerB, outputUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * inputGPU = NewTensor(inputOrder, inputDimSize, X_FLOAT, 1.0F, 0);
XTensor * outputGPUA = NewTensor(outputOrder, outputDimSize, X_FLOAT, 1.0F, 0);
XTensor * outputGPUB = NewTensor(outputOrder, outputDimSize, X_FLOAT, 1.0F, 0);
XTensor * indexGPUA = NewTensor(outputOrder, outputDimSize, X_INT, 1.0F, 0);
XTensor * indexGPUB = NewTensor(outputOrder, outputDimSize, X_INT, 1.0F, 0);
/* initialize variables */
inputGPU->SetData(inputData, inputUnitNum);
outputGPUA->SetZeroAll();
outputGPUB->SetZeroAll();
indexGPUA->SetZeroAll();
indexGPUB->SetZeroAll();
/* call TopK function */
dim = 0;
k = inputDimSize[dim];
TopK(inputGPU, outputGPUA, indexGPUA, dim, k);
dim = 1;
k = inputDimSize[dim];
TopK(inputGPU, outputGPUB, indexGPUB, dim, k);
/* check results */
gpuTest = outputGPUA->CheckData(outputAnswerA, outputUnitNum) &&
outputGPUB->CheckData(outputAnswerB, outputUnitNum) &&
indexGPUA->CheckData(indexAnswerA, outputUnitNum) &&
indexGPUB->CheckData(indexAnswerB, outputUnitNum);
/* destroy variables */
delete input, outputA, outputB, indexA, indexB;
delete inputGPU, outputGPUA, outputGPUB, indexGPUA, indexGPUB;
delete[] inputDimSize, outputDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete input, outputA, outputB, indexA, indexB;
delete[] inputDimSize, outputDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
*/
/* test for TopK Function */
extern "C"
bool TestTopK()
{
XPRINT(0, stdout, "[TEST TopK]\n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
caseFlag = TestTopK1();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* other cases test */
/*
TODO!!
*/
if (returnFlag) {
XPRINT(0, stdout, ">> All Passed!\n");
}
else
XPRINT(0, stdout, ">> Failed!\n");
XPRINT(0, stdout, "\n");
return returnFlag;
}
} // namespace nts(NiuTrans.Tensor)
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#ifndef __TEST_TOPK_H__
#define __TEST_TOPK_H__
#include "../core/TopK.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for TopK Function */
extern "C"
bool TestTopK();
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_TOPK_H__
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-13
*/
#include "../XTensor.h"
#include "../core/Unsqueeze.h"
#include "../XList.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: transform a tensor by merging it along with a dimension.
* In this case, (3, 2) -> (6), whereToUnsqueeze=1, leadingDim=0.
*/
bool TestUnsqueeze1()
{
/* a source tensor of size (2, 3) */
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 2;
sDimSize[1] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a target tensor of size (6, ) */
int tOrder = 3;
int * tDimSize = new int[tOrder];
tDimSize[0] = 2;
tDimSize[1] = 3;
tDimSize[2] = 2;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData[2][3] = { {0.0, 1.0, 2.0},
{3.0, 4.0, 5.0} };
DTYPE answer[2][2][3] = { { {0.0, 1.0, 2.0},
{3.0, 4.0, 5.0} },
{ {0.0, 1.0, 2.0},
{3.0, 4.0, 5.0} } };
DTYPE new_answer[2][3][2] = { { {0.0, 0.0},
{1.0, 1.0},
{2.0, 2.0} },
{ {3.0, 3.0},
{4.0, 4.0},
{5.0, 5.0} } };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s->SetData(sData, sUnitNum);
t->SetZeroAll();
/* call Unsqueeze function */
Unsqueeze(s, t, 2, 2);
/* check results */
cpuTest = t->CheckData(new_answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensor */
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
/* Initialize variables */
sGPU->SetData(sData, sUnitNum);
tGPU->SetZeroAll();
/* call Unsqueeze function */
Unsqueeze(sGPU, tGPU, 2, 2);
/* check results */
gpuTest = tGPU->CheckData(new_answer, tUnitNum);
/* destroy variables */
delete s, t, sGPU, tGPU;
delete[] sDimSize, tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s, t;
delete[] sDimSize, tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
*/
/* test for Unsqueeze Function */
extern "C"
bool TestUnsqueeze()
{
XPRINT(0, stdout, "[TEST Unsqueeze] -------------\n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
caseFlag = TestUnsqueeze1();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* other cases test */
/*
TODO!!
*/
if (returnFlag) {
XPRINT(0, stdout, ">> All Passed!\n");
}
else
XPRINT(0, stdout, ">> Failed!\n");
XPRINT(0, stdout, "\n");
return returnFlag;
}
} // namespace nts(NiuTrans.Tensor)
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/
#ifndef __TEST_UNSQUEEZE_H__
#define __TEST_UNSQUEEZE_H__
#include "../core/Unsqueeze.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for Unsqueeze Function */
extern "C"
bool TestUnsqueeze();
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_UNSQUEEZE_H__
...@@ -38,7 +38,7 @@ bool Test() ...@@ -38,7 +38,7 @@ bool Test()
wrong = !TestMultiplyElementWise() || wrong; wrong = !TestMultiplyElementWise() || wrong;
wrong = !TestNegate() || wrong; wrong = !TestNegate() || wrong;
wrong = !TestNormalize() || wrong; wrong = !TestNormalize() || wrong;
//wrong = !TestPower() || wrong; wrong = !TestPower() || wrong;
wrong = !TestReduceMax() || wrong; wrong = !TestReduceMax() || wrong;
wrong = !TestReduceMean() || wrong; wrong = !TestReduceMean() || wrong;
wrong = !TestReduceSum() || wrong; wrong = !TestReduceSum() || wrong;
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论