Commit 53d88872 by liyinqiao

Reorganize the code

1. Move the New/Initial/Del Tensor functions from XTensor.* to XElement.*
2. Move IsSameShaped functions from XTensor.* to core/shape/IsSameShaped.*
3. Move CheckData functions from XTensor.* to core/utilities/CheckData.*
parent 06e98a98
...@@ -381,7 +381,7 @@ void XMathGrad::GradDiv(XTensor * node, bool isEfficient) ...@@ -381,7 +381,7 @@ void XMathGrad::GradDiv(XTensor * node, bool isEfficient)
XNoder::MakeGrad(a); XNoder::MakeGrad(a);
XNoder::MakeGrad(b); XNoder::MakeGrad(b);
CheckNTErrors(XTensor::IsSameShaped(a, b), "Wrong sized input tensors!"); CheckNTErrors(IsSameShaped(a, b), "Wrong sized input tensors!");
_Div(node->grad, b, a->grad, 1.0F); _Div(node->grad, b, a->grad, 1.0F);
...@@ -726,7 +726,7 @@ void XMathGrad::GradMultiply(XTensor * node, bool isEfficient) ...@@ -726,7 +726,7 @@ void XMathGrad::GradMultiply(XTensor * node, bool isEfficient)
XTensor * a = income.tails[0]; XTensor * a = income.tails[0];
XTensor * b = income.tails[1]; XTensor * b = income.tails[1];
CheckNTErrors(XTensor::IsSameShaped(a, b), "Wrong sized input tensors!"); CheckNTErrors(IsSameShaped(a, b), "Wrong sized input tensors!");
if (!isEfficient || a->isGrad) { if (!isEfficient || a->isGrad) {
XNoder::MakeGrad(a); XNoder::MakeGrad(a);
......
...@@ -281,7 +281,7 @@ void XShapeGrad::GradMergeList(XTensor * node, bool isEfficient) ...@@ -281,7 +281,7 @@ void XShapeGrad::GradMergeList(XTensor * node, bool isEfficient)
smallsGrad.Add(tail->grad); smallsGrad.Add(tail->grad);
if(i > 1){ if(i > 1){
CheckNTErrors(XTensor::IsSameShaped(last, tail), CheckNTErrors(IsSameShaped(last, tail),
"Input tensors must be of the same size!"); "Input tensors must be of the same size!");
} }
......
...@@ -29,7 +29,7 @@ void XNoder::MakeGrad(XTensor * node) ...@@ -29,7 +29,7 @@ void XNoder::MakeGrad(XTensor * node)
if(node == NULL) if(node == NULL)
return; return;
if(!XTensor::IsSameShaped(node, node->grad)){ if(!IsSameShaped(node, node->grad)){
delete node->grad; delete node->grad;
node->grad = NewTensor(node); node->grad = NewTensor(node);
node->grad->SetZeroAll(); node->grad->SetZeroAll();
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
* $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-07-18 * $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-07-18
*/ */
#include "../tensor/XTensor.h" #include "../tensor/core/CHeader.h"
#ifndef __XNODER_H__ #ifndef __XNODER_H__
#define __XNODER_H__ #define __XNODER_H__
......
...@@ -319,7 +319,7 @@ void T2TSearch::Generate(T2TStateBundle * beam) ...@@ -319,7 +319,7 @@ void T2TSearch::Generate(T2TStateBundle * beam)
for (int i = 0; i < indexGPU.unitNum; i++) for (int i = 0; i < indexGPU.unitNum; i++)
indexGPU.SetInt(i * stride + indexGPU.GetInt(i), i); indexGPU.SetInt(i * stride + indexGPU.GetInt(i), i);
CheckNTErrors(XTensor::IsSameShaped(&prob, &probPath), "Wrong tensor shape!"); CheckNTErrors(IsSameShaped(&prob, &probPath), "Wrong tensor shape!");
/* sequence probability of top-k candidates */ /* sequence probability of top-k candidates */
XTensor probPathTopK; XTensor probPathTopK;
......
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2019-10-21
*/
#ifndef __XELEMENT_H__
#define __XELEMENT_H__
#include "XTensor.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/*
* we define the "new and delete" functions below
*/
/* initialize a XTensor */
void InitTensor(XTensor * tensor,
const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const float myDenseRatio = 1.0F, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense XTensor V2 */
void InitTensorV2(XTensor * tensor,
const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense vector */
void InitTensor1D(XTensor * tensor, const int num,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense vector V2 */
void InitTensor1DV2(XTensor * tensor, const int num,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense matrix */
void InitTensor2D(XTensor * tensor, const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense matrix V2 */
void InitTensor2DV2(XTensor * tensor, const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense 3d tensor */
void InitTensor3D(XTensor * tensor, const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense 3d tensor V2 */
void InitTensor3DV2(XTensor * tensor, const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense 4d tensor */
void InitTensor4D(XTensor * tensor, const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense 4d tensor V2 */
void InitTensor4DV2(XTensor * tensor, const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense 5d tensor */
void InitTensor5D(XTensor * tensor, const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense 5d tensor V2 */
void InitTensor5DV2(XTensor * tensor, const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a tensor with a reference tensor */
void InitTensor(XTensor * tensor, const XTensor * reference);
/* initialize a tensor with a reference tensor */
void InitTensorV2(XTensor * tensor, const XTensor * reference);
/* initialize a tensor on the CPU with a reference tensor */
void InitTensorOnCPU(XTensor * tensor, const XTensor * reference);
/* generate a XTensor with no initialization */
XTensor * NewTensor();
/* generate a XTensor */
XTensor * NewTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const float myDenseRatio = 1.0F, const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense XTensor V2 */
XTensor * NewTensorV2(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a XTensor which allocates data on the buffer */
XTensor * NewTensorBuf(const int myOrder, const int * myDimSize,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const float myDenseRatio = 1.0F,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense XTensor which allocates data on the buffer V2 */
XTensor * NewTensorBufV2(const int myOrder, const int * myDimSize,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* generate a XTensor which allocates data on the buffer */
XTensor * NewTensorBuf(const XTensor * reference, int devID, XMem * myMem);
/* generate a XTensor which allocates data on the buffer V2 */
XTensor * NewTensorBufV2(const XTensor * reference, int devID, const bool isEnableGrad = true);
/* generate a dense vector */
XTensor * NewTensor1D(const int num, const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1,
XMem * myMem = NULL);
/* generate a dense vector V2 */
XTensor * NewTensor1DV2(const int num, const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense matrix */
XTensor * NewTensor2D(const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense matrix V2 */
XTensor * NewTensor2DV2(const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense 3d tensor */
XTensor * NewTensor3D(const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense 3d tensor V2 */
XTensor * NewTensor3DV2(const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense 4d tensor */
XTensor * NewTensor4D(const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense 4d tensor V2 */
XTensor * NewTensor4DV2(const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense 5d tensor */
XTensor * NewTensor5D(const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense 5d tensor V2 */
XTensor * NewTensor5DV2(const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense vector by range */
XTensor * NewTensorRange(int lower, int upper, int step, const TENSOR_DATA_TYPE myDataType = X_INT, const int myDevID = -1, const bool isEnableGrad = true);
/* generate a copy of XTensor (with a reference to a given tensor) */
XTensor * NewTensor(const XTensor * a, bool isFilledData = true);
/* free the data space of a given tensor */
void DelTensor(XTensor * tensor);
/* free the data space of a given tensor (on the buffer) */
void DelTensorBuf(XTensor * tensor);
} // namespace nts(NiuTrans.Tensor)
#endif // __XELEMENT_H__
\ No newline at end of file
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include "XDataType.h" #include "XDataType.h"
#include "XMem.h" #include "XMem.h"
#include "XLink.h" #include "XLink.h"
#include "XElement.h"
/* the nts (NiuTrans.Tensor) namespace */ /* the nts (NiuTrans.Tensor) namespace */
namespace nts{ namespace nts{
...@@ -253,14 +254,6 @@ public: ...@@ -253,14 +254,6 @@ public:
/* relocate the data on the target device */ /* relocate the data on the target device */
void SetDevice(int myDevId, XMem * myMem = NULL); void SetDevice(int myDevId, XMem * myMem = NULL);
/* judge whether the two matrices are in the same type and size */
static
bool IsSameShaped(const XTensor * a, const XTensor * b);
/* judge whether the three matrices are in the same type and size */
static
bool IsSameShaped(const XTensor * a, const XTensor * b, const XTensor * c);
/* judge whether b is the reduced shape of a ?? */ /* judge whether b is the reduced shape of a ?? */
static static
bool IsReduceShaped(const XTensor * a, const XTensor * b, int dim); bool IsReduceShaped(const XTensor * a, const XTensor * b, int dim);
...@@ -324,12 +317,6 @@ public: ...@@ -324,12 +317,6 @@ public:
/* set tensor items with an array of values */ /* set tensor items with an array of values */
void SetDataBatchedWithValues(MTYPE * offsets, void * values, int num); void SetDataBatchedWithValues(MTYPE * offsets, void * values, int num);
/* check whether the data array is the same as the answer */
bool CheckData(const void * answer, int num, int beg = 0) const;
/* check whether the data array is the same as the answer */
bool CheckData(const void * answer, int num, float tolerance, int beg = 0) const;
/* set the pointer to "data" */ /* set the pointer to "data" */
void SetDataPointer(); void SetDataPointer();
...@@ -463,153 +450,6 @@ extern MUTEX_HANDLE tensorMutex; ...@@ -463,153 +450,6 @@ extern MUTEX_HANDLE tensorMutex;
extern XTensor NULLTensor; extern XTensor NULLTensor;
extern int MakeTensorID(); extern int MakeTensorID();
/************************************************
* we define the "new and delete" functions below
*/
/* initialize a XTensor */
void InitTensor(XTensor * tensor,
const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const float myDenseRatio = 1.0F, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense XTensor V2 */
void InitTensorV2(XTensor * tensor,
const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense vector */
void InitTensor1D(XTensor * tensor, const int num,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense vector V2 */
void InitTensor1DV2(XTensor * tensor, const int num,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense matrix */
void InitTensor2D(XTensor * tensor, const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense matrix V2 */
void InitTensor2DV2(XTensor * tensor, const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense 3d tensor */
void InitTensor3D(XTensor * tensor, const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense 3d tensor V2 */
void InitTensor3DV2(XTensor * tensor, const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense 4d tensor */
void InitTensor4D(XTensor * tensor, const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense 4d tensor V2 */
void InitTensor4DV2(XTensor * tensor, const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense 5d tensor */
void InitTensor5D(XTensor * tensor, const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense 5d tensor V2 */
void InitTensor5DV2(XTensor * tensor, const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a tensor with a reference tensor */
void InitTensor(XTensor * tensor, const XTensor * reference);
/* initialize a tensor with a reference tensor */
void InitTensorV2(XTensor * tensor, const XTensor * reference);
/* initialize a tensor on the CPU with a reference tensor */
void InitTensorOnCPU(XTensor * tensor, const XTensor * reference);
/* generate a XTensor with no initialization */
XTensor * NewTensor();
/* generate a XTensor */
XTensor * NewTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const float myDenseRatio = 1.0F, const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense XTensor V2 */
XTensor * NewTensorV2(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a XTensor which allocates data on the buffer */
XTensor * NewTensorBuf(const int myOrder, const int * myDimSize,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const float myDenseRatio = 1.0F,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense XTensor which allocates data on the buffer V2 */
XTensor * NewTensorBufV2(const int myOrder, const int * myDimSize,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* generate a XTensor which allocates data on the buffer */
XTensor * NewTensorBuf(const XTensor * reference, int devID, XMem * myMem);
/* generate a XTensor which allocates data on the buffer V2 */
XTensor * NewTensorBufV2(const XTensor * reference, int devID, const bool isEnableGrad = true);
/* generate a dense vector */
XTensor * NewTensor1D(const int num, const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1,
XMem * myMem = NULL);
/* generate a dense vector V2 */
XTensor * NewTensor1DV2(const int num, const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense matrix */
XTensor * NewTensor2D(const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense matrix V2 */
XTensor * NewTensor2DV2(const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense 3d tensor */
XTensor * NewTensor3D(const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense 3d tensor V2 */
XTensor * NewTensor3DV2(const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense 4d tensor */
XTensor * NewTensor4D(const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense 4d tensor V2 */
XTensor * NewTensor4DV2(const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense 5d tensor */
XTensor * NewTensor5D(const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense 5d tensor V2 */
XTensor * NewTensor5DV2(const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense vector by range */
XTensor * NewTensorRange(int lower, int upper, int step, const TENSOR_DATA_TYPE myDataType = X_INT, const int myDevID = -1, const bool isEnableGrad = true);
/* generate a copy of XTensor (with a reference to a given tensor) */
XTensor * NewTensor(const XTensor * a, bool isFilledData = true);
/* free the data space of a given tensor */
void DelTensor(XTensor * tensor);
/* free the data space of a given tensor (on the buffer) */
void DelTensorBuf(XTensor * tensor);
/* overloading of the plus-sign */ /* overloading of the plus-sign */
XTensor operator+ (const DTYPE shift, const XTensor &tensor); XTensor operator+ (const DTYPE shift, const XTensor &tensor);
......
...@@ -85,6 +85,7 @@ ...@@ -85,6 +85,7 @@
#include "shape/Squeeze.h" #include "shape/Squeeze.h"
#include "shape/Transpose.h" #include "shape/Transpose.h"
#include "shape/Unsqueeze.h" #include "shape/Unsqueeze.h"
#include "shape/IsSameShaped.h"
#include "sort/Sort.h" #include "sort/Sort.h"
#include "sort/TopK.h" #include "sort/TopK.h"
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "../../XTensor.h" #include "../../XTensor.h"
#include "../../XName.h" #include "../../XName.h"
#include "../../XUtility.h" #include "../../XUtility.h"
#include "../shape/IsSameShaped.h"
#include "Div.h" #include "Div.h"
#include "Div.cuh" #include "Div.cuh"
#include "DivDim.h" #include "DivDim.h"
...@@ -168,7 +169,7 @@ int GetDivDimIndex(const XTensor &a, const XTensor &b) ...@@ -168,7 +169,7 @@ int GetDivDimIndex(const XTensor &a, const XTensor &b)
{ {
if(a.order < b.order) if(a.order < b.order)
return -1; return -1;
if(XTensor::IsSameShaped(&a, &b)) if(IsSameShaped(&a, &b))
return -1; return -1;
int hitCount = 0; int hitCount = 0;
...@@ -253,7 +254,7 @@ where i is the index of the item ...@@ -253,7 +254,7 @@ where i is the index of the item
*/ */
void Div(const XTensor &a, const XTensor &b, XTensor &c, DTYPE alpha, int leadingDim) void Div(const XTensor &a, const XTensor &b, XTensor &c, DTYPE alpha, int leadingDim)
{ {
if (!c.isInit || !XTensor::IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(&a, &c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "../../XName.h" #include "../../XName.h"
#include "../../XUtility.h" #include "../../XUtility.h"
#include "../movement/CopyValues.h" #include "../movement/CopyValues.h"
#include "../shape/IsSameShaped.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -56,7 +57,7 @@ void _DivDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE alp ...@@ -56,7 +57,7 @@ void _DivDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE alp
CheckDev(a->devID, b->devID); CheckDev(a->devID, b->devID);
if(XTensor::IsSameShaped(a, b)){ if(IsSameShaped(a, b)){
_Div(a, b, c, alpha); _Div(a, b, c, alpha);
return; return;
} }
...@@ -188,7 +189,7 @@ i.e., a is divided with b by broadcasting ...@@ -188,7 +189,7 @@ i.e., a is divided with b by broadcasting
*/ */
void DivDim(const XTensor &a, const XTensor &b, XTensor &c, int n, DTYPE alpha) void DivDim(const XTensor &a, const XTensor &b, XTensor &c, int n, DTYPE alpha)
{ {
if (!c.isInit || !XTensor::IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(&a, &c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "../../XTensor.h" #include "../../XTensor.h"
#include "../../XName.h" #include "../../XName.h"
#include "../../XUtility.h" #include "../../XUtility.h"
#include "../shape/IsSameShaped.h"
#include "Mask.h" #include "Mask.h"
#include "Mask.cuh" #include "Mask.cuh"
...@@ -171,7 +172,7 @@ where i is the index of the element ...@@ -171,7 +172,7 @@ where i is the index of the element
*/ */
void Mask(const XTensor &a, const XTensor &mask, XTensor &c, DTYPE alpha) void Mask(const XTensor &a, const XTensor &mask, XTensor &c, DTYPE alpha)
{ {
if (!c.isInit || !XTensor::IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(&a, &c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "../../XTensor.h" #include "../../XTensor.h"
#include "../../XDevice.h" #include "../../XDevice.h"
#include "../../XName.h" #include "../../XName.h"
#include "../shape/IsSameShaped.h"
#include "MatrixMulBatched.h" #include "MatrixMulBatched.h"
#include "XTensorBLAS.h" #include "XTensorBLAS.h"
#include "MatrixMul2D.h" #include "MatrixMul2D.h"
...@@ -242,9 +243,9 @@ void _MatrixMulBatchedCPU(const TensorList * a, MATRIX_TRANS_TYPE transposedA, ...@@ -242,9 +243,9 @@ void _MatrixMulBatchedCPU(const TensorList * a, MATRIX_TRANS_TYPE transposedA,
XTensor * ai = (XTensor*)a->GetItem(i); XTensor * ai = (XTensor*)a->GetItem(i);
XTensor * bi = (XTensor*)b->GetItem(i); XTensor * bi = (XTensor*)b->GetItem(i);
XTensor * ci = (XTensor*)c->GetItem(i); XTensor * ci = (XTensor*)c->GetItem(i);
if (!XTensor::IsSameShaped(aim, ai) || if (!IsSameShaped(aim, ai) ||
!XTensor::IsSameShaped(bim, bi) || !IsSameShaped(bim, bi) ||
!XTensor::IsSameShaped(cim, ci)) !IsSameShaped(cim, ci))
{ {
isUniform = false; isUniform = false;
break; break;
......
...@@ -37,7 +37,7 @@ int GetSumIndex(const XTensor &a, const XTensor &b) ...@@ -37,7 +37,7 @@ int GetSumIndex(const XTensor &a, const XTensor &b)
{ {
if (a.order < b.order) if (a.order < b.order)
return -1; return -1;
if (XTensor::IsSameShaped(&a, &b)) if (IsSameShaped(&a, &b))
return -1; return -1;
int hitCount = 0; int hitCount = 0;
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "../../XTensor.h" #include "../../XTensor.h"
#include "../../XName.h" #include "../../XName.h"
#include "../../XUtility.h" #include "../../XUtility.h"
#include "../shape/IsSameShaped.h"
#include "Multiply.h" #include "Multiply.h"
#include "Multiply.cuh" #include "Multiply.cuh"
#include "MultiplyDim.h" #include "MultiplyDim.h"
...@@ -169,7 +170,7 @@ int GetMultiplyDimIndex(const XTensor &a, const XTensor &b) ...@@ -169,7 +170,7 @@ int GetMultiplyDimIndex(const XTensor &a, const XTensor &b)
{ {
if(a.order < b.order) if(a.order < b.order)
return -1; return -1;
if(XTensor::IsSameShaped(&a, &b)) if(IsSameShaped(&a, &b))
return -1; return -1;
int hitCount = 0; int hitCount = 0;
...@@ -254,7 +255,7 @@ where i is the index of the item ...@@ -254,7 +255,7 @@ where i is the index of the item
*/ */
void Multiply(const XTensor &a, const XTensor &b, XTensor &c, DTYPE alpha, int leadingDim) void Multiply(const XTensor &a, const XTensor &b, XTensor &c, DTYPE alpha, int leadingDim)
{ {
if (!c.isInit || !XTensor::IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(&a, &c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "MultiplyDim.h" #include "MultiplyDim.h"
#include "MultiplyDim.cuh" #include "MultiplyDim.cuh"
#include "../shape/Unsqueeze.h" #include "../shape/Unsqueeze.h"
#include "../shape/IsSameShaped.h"
#include "../../XName.h" #include "../../XName.h"
#include "../../XUtility.h" #include "../../XUtility.h"
#include "../movement/CopyValues.h" #include "../movement/CopyValues.h"
...@@ -57,7 +58,7 @@ void _MultiplyDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYP ...@@ -57,7 +58,7 @@ void _MultiplyDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYP
CheckDev(a->devID, b->devID); CheckDev(a->devID, b->devID);
if(XTensor::IsSameShaped(a, b)){ if(IsSameShaped(a, b)){
_Multiply(a, b, c, alpha); _Multiply(a, b, c, alpha);
return; return;
} }
...@@ -203,7 +204,7 @@ i.e., a is multiplied with b by broadcasting ...@@ -203,7 +204,7 @@ i.e., a is multiplied with b by broadcasting
*/ */
void MultiplyDim(const XTensor &a, const XTensor &b, XTensor &c, int n) void MultiplyDim(const XTensor &a, const XTensor &b, XTensor &c, int n)
{ {
if (!c.isInit || !XTensor::IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(&a, &c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
...@@ -371,7 +372,7 @@ where some of dimensions of b can be of size 1 ...@@ -371,7 +372,7 @@ where some of dimensions of b can be of size 1
*/ */
void MultiplyBroadcast(const XTensor &a, const XTensor &b, XTensor &c) void MultiplyBroadcast(const XTensor &a, const XTensor &b, XTensor &c)
{ {
if (!c.isInit || !XTensor::IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(&a, &c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "../../XTensor.h" #include "../../XTensor.h"
#include "../../XName.h" #include "../../XName.h"
#include "../../XUtility.h" #include "../../XUtility.h"
#include "../shape/IsSameShaped.h"
#include "Sub.h" #include "Sub.h"
#include "Sub.cuh" #include "Sub.cuh"
#include "SubDim.h" #include "SubDim.h"
...@@ -149,7 +150,7 @@ int GetSubDimIndex(const XTensor &a, const XTensor &b) ...@@ -149,7 +150,7 @@ int GetSubDimIndex(const XTensor &a, const XTensor &b)
{ {
if(a.order < b.order) if(a.order < b.order)
return -1; return -1;
if(XTensor::IsSameShaped(&a, &b)) if(IsSameShaped(&a, &b))
return -1; return -1;
int hitCount = 0; int hitCount = 0;
...@@ -223,7 +224,7 @@ tensor subtraction c = a - b * \beta ...@@ -223,7 +224,7 @@ tensor subtraction c = a - b * \beta
*/ */
void Sub(const XTensor &a, const XTensor &b, XTensor &c, DTYPE beta) void Sub(const XTensor &a, const XTensor &b, XTensor &c, DTYPE beta)
{ {
if (!c.isInit || !XTensor::IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(&a, &c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "../../XName.h" #include "../../XName.h"
#include "../../XUtility.h" #include "../../XUtility.h"
#include "../movement/CopyValues.h" #include "../movement/CopyValues.h"
#include "../shape/IsSameShaped.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -61,7 +62,7 @@ void _SubDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE bet ...@@ -61,7 +62,7 @@ void _SubDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE bet
return; return;
} }
if (XTensor::IsSameShaped(a, b)) { if (IsSameShaped(a, b)) {
_Sub(a, b, c, beta); _Sub(a, b, c, beta);
return; return;
} }
...@@ -188,7 +189,7 @@ i.e., a is subtracted with b by broadcasting ...@@ -188,7 +189,7 @@ i.e., a is subtracted with b by broadcasting
*/ */
void SubDim(const XTensor &a, const XTensor &b, XTensor &c, int n, DTYPE beta) void SubDim(const XTensor &a, const XTensor &b, XTensor &c, int n, DTYPE beta)
{ {
if (!c.isInit || !XTensor::IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(&a, &c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "../../XUtility.h" #include "../../XUtility.h"
#include "../../XBLAS.h" #include "../../XBLAS.h"
#include "../movement/CopyValues.h" #include "../movement/CopyValues.h"
#include "../shape/IsSameShaped.h"
#include "Sum.h" #include "Sum.h"
#include "Sum.cuh" #include "Sum.cuh"
#include "SumDim.h" #include "SumDim.h"
...@@ -183,7 +184,7 @@ int GetSumDimIndex(const XTensor &a, const XTensor &b) ...@@ -183,7 +184,7 @@ int GetSumDimIndex(const XTensor &a, const XTensor &b)
{ {
if(a.order < b.order) if(a.order < b.order)
return -1; return -1;
if(XTensor::IsSameShaped(&a, &b)) if(IsSameShaped(&a, &b))
return -1; return -1;
int hitCount = 0; int hitCount = 0;
...@@ -256,7 +257,7 @@ tensor summation c = a + b * \beta ...@@ -256,7 +257,7 @@ tensor summation c = a + b * \beta
*/ */
void Sum(const XTensor &a, const XTensor &b, XTensor &c, DTYPE beta) void Sum(const XTensor &a, const XTensor &b, XTensor &c, DTYPE beta)
{ {
if (!c.isInit || !XTensor::IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(&a, &c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "SumDim.h" #include "SumDim.h"
#include "SumDim.cuh" #include "SumDim.cuh"
#include "../shape/Unsqueeze.h" #include "../shape/Unsqueeze.h"
#include "../shape/IsSameShaped.h"
#include "../../XName.h" #include "../../XName.h"
#include "../../XUtility.h" #include "../../XUtility.h"
#include "../movement/CopyValues.h" #include "../movement/CopyValues.h"
...@@ -64,7 +65,7 @@ void _SumDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE bet ...@@ -64,7 +65,7 @@ void _SumDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE bet
return; return;
} }
if(XTensor::IsSameShaped(a, b)){ if(IsSameShaped(a, b)){
_Sum(a, b, c, beta); _Sum(a, b, c, beta);
return; return;
} }
...@@ -205,7 +206,7 @@ i.e., a is summed with b by broadcasting ...@@ -205,7 +206,7 @@ i.e., a is summed with b by broadcasting
*/ */
void SumDim(const XTensor &a, const XTensor &b, XTensor &c, int n, DTYPE beta) void SumDim(const XTensor &a, const XTensor &b, XTensor &c, int n, DTYPE beta)
{ {
if (!c.isInit || !XTensor::IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(&a, &c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
...@@ -374,7 +375,7 @@ c = a + b * \beta ...@@ -374,7 +375,7 @@ c = a + b * \beta
*/ */
void SumBroadcast(const XTensor &a, const XTensor &b, XTensor &c, DTYPE beta) void SumBroadcast(const XTensor &a, const XTensor &b, XTensor &c, DTYPE beta)
{ {
if (!c.isInit || !XTensor::IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(&a, &c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "../../XUtility.h" #include "../../XUtility.h"
#include "../../XDevice.h" #include "../../XDevice.h"
#include "../../XTensor.h" #include "../../XTensor.h"
#include "../shape/IsSameShaped.h"
#include "XTensorBLAS.h" #include "XTensorBLAS.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -224,9 +225,9 @@ void _CudaBLASMatrixMULList(cublasHandle_t * handle, ...@@ -224,9 +225,9 @@ void _CudaBLASMatrixMULList(cublasHandle_t * handle,
XTensor * ai = (XTensor*)a->GetItem(i); XTensor * ai = (XTensor*)a->GetItem(i);
XTensor * bi = (XTensor*)b->GetItem(i); XTensor * bi = (XTensor*)b->GetItem(i);
XTensor * ci = (XTensor*)c->GetItem(i); XTensor * ci = (XTensor*)c->GetItem(i);
if (!XTensor::IsSameShaped(aim, ai) || if (!IsSameShaped(aim, ai) ||
!XTensor::IsSameShaped(bim, bi) || !IsSameShaped(bim, bi) ||
!XTensor::IsSameShaped(cim, ci)) !IsSameShaped(cim, ci))
{ {
isUniform = false; isUniform = false;
break; break;
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <math.h> #include <math.h>
#include "../../XName.h" #include "../../XName.h"
#include "../shape/IsSameShaped.h"
#include "Binary.h" #include "Binary.h"
#include "Binary.cuh" #include "Binary.cuh"
...@@ -77,7 +78,7 @@ void _funcName(const XTensor * a, XTensor * b, T num) ...@@ -77,7 +78,7 @@ void _funcName(const XTensor * a, XTensor * b, T num)
_cudaFuncName(a, b, num); \ _cudaFuncName(a, b, num); \
return; \ return; \
} \ } \
CheckNTErrors((XTensor::IsSameShaped(a, b)), \ CheckNTErrors((IsSameShaped(a, b)), \
"Input tensors should have the same data type!"); \ "Input tensors should have the same data type!"); \
if (a->dataType == X_INT) { \ if (a->dataType == X_INT) { \
int * d = (int*)a->data; \ int * d = (int*)a->data; \
...@@ -112,7 +113,7 @@ void _funcName(const XTensor * a, XTensor * b, T num) ...@@ -112,7 +113,7 @@ void _funcName(const XTensor * a, XTensor * b, T num)
if (a->devID >= 0) { \ if (a->devID >= 0) { \
ShowNTErrors("No GPU devices support!") \ ShowNTErrors("No GPU devices support!") \
} \ } \
CheckNTErrors((XTensor::IsSameShaped(a, b)), \ CheckNTErrors((IsSameShaped(a, b)), \
"Input tensors should have the same data type!"); \ "Input tensors should have the same data type!"); \
if (a->dataType == X_INT) { \ if (a->dataType == X_INT) { \
int * d = (int*)a->data; \ int * d = (int*)a->data; \
...@@ -181,7 +182,7 @@ template XTensor funcName<double>(const XTensor&, double); ...@@ -181,7 +182,7 @@ template XTensor funcName<double>(const XTensor&, double);
template<class T> \ template<class T> \
void funcName(const XTensor &a, XTensor &b, T num) \ void funcName(const XTensor &a, XTensor &b, T num) \
{ \ { \
if (!b.isInit || !XTensor::IsSameShaped(&a, &b)) { \ if (!b.isInit || !IsSameShaped(&a, &b)) { \
InitTensor(&b, &a); \ InitTensor(&b, &a); \
} \ } \
_funcName(&a, &b, num); \ _funcName(&a, &b, num); \
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "../../XDevice.h" #include "../../XDevice.h"
#include "../../XUtility.h" #include "../../XUtility.h"
#include "../../XName.h" #include "../../XName.h"
#include "../shape/IsSameShaped.h"
#include "Binary.h" #include "Binary.h"
#include "Binary.cuh" #include "Binary.cuh"
...@@ -89,7 +90,7 @@ void Kernel##funcName(T1 * a, T1 * b, int size, T2 num) ...@@ -89,7 +90,7 @@ void Kernel##funcName(T1 * a, T1 * b, int size, T2 num)
template<class T> \ template<class T> \
void _Cuda##funcName(const XTensor * a, XTensor * b, T num) \ void _Cuda##funcName(const XTensor * a, XTensor * b, T num) \
{ \ { \
CheckNTErrors((XTensor::IsSameShaped(a, b)), \ CheckNTErrors((IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \ "Input tensors should have the same type!"); \
CheckNTErrors((a->isSparse == false), "TODO!"); \ CheckNTErrors((a->isSparse == false), "TODO!"); \
\ \
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "../../XTensor.h" #include "../../XTensor.h"
#include "../../XName.h" #include "../../XName.h"
#include "../shape/IsSameShaped.h"
#include "Clip.h" #include "Clip.h"
#include "Clip.cuh" #include "Clip.cuh"
...@@ -43,7 +44,7 @@ void _Clip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper) ...@@ -43,7 +44,7 @@ void _Clip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper)
} }
#endif #endif
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!"); CheckNTErrors((IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!"); CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!");
DTYPE * d = (DTYPE*)a->data; DTYPE * d = (DTYPE*)a->data;
...@@ -110,7 +111,7 @@ XTensor Clip(const XTensor & a, DTYPE lower, DTYPE upper) ...@@ -110,7 +111,7 @@ XTensor Clip(const XTensor & a, DTYPE lower, DTYPE upper)
void Clip(const XTensor & a, XTensor & b, DTYPE lower, DTYPE upper) void Clip(const XTensor & a, XTensor & b, DTYPE lower, DTYPE upper)
{ {
if (!b.isInit || !XTensor::IsSameShaped(&a, &b)) { if (!b.isInit || !IsSameShaped(&a, &b)) {
InitTensor(&b, &a); InitTensor(&b, &a);
} }
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "../../XDevice.h" #include "../../XDevice.h"
#include "../../XTensor.h" #include "../../XTensor.h"
#include "../shape/IsSameShaped.h"
#include "Clip.h" #include "Clip.h"
#include "Clip.cuh" #include "Clip.cuh"
...@@ -74,7 +75,7 @@ set each entry to its clip value ...@@ -74,7 +75,7 @@ set each entry to its clip value
*/ */
void _CudaClip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper) void _CudaClip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper)
{ {
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!"); CheckNTErrors((IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->isSparse == false), "TODO!"); CheckNTErrors((a->isSparse == false), "TODO!");
int gridSize[3]; int gridSize[3];
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "../../XTensor.h" #include "../../XTensor.h"
#include "../../XName.h" #include "../../XName.h"
#include "../shape/IsSameShaped.h"
#include "Compare.h" #include "Compare.h"
#include "Compare.cuh" #include "Compare.cuh"
...@@ -41,7 +42,7 @@ DTYPE myIsNotEqual(DTYPE a, DTYPE b) ...@@ -41,7 +42,7 @@ DTYPE myIsNotEqual(DTYPE a, DTYPE b)
#define _SIMPLE_COMPARE_FUNCTION(_funcName, _cudaFuncName, origFunc) \ #define _SIMPLE_COMPARE_FUNCTION(_funcName, _cudaFuncName, origFunc) \
void _funcName(const XTensor * a, XTensor * b, DTYPE number) \ void _funcName(const XTensor * a, XTensor * b, DTYPE number) \
{ \ { \
CheckNTErrors((XTensor::IsSameShaped(a, b)), \ CheckNTErrors((IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \ "Input tensors should have the same type!"); \
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!"); \ CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!"); \
/* run it on GPUs */ \ /* run it on GPUs */ \
...@@ -58,7 +59,7 @@ void _funcName(const XTensor * a, XTensor * b, DTYPE number) ...@@ -58,7 +59,7 @@ void _funcName(const XTensor * a, XTensor * b, DTYPE number)
#define _SIMPLE_COMPARE_FUNCTION(_funcName, origFunc) \ #define _SIMPLE_COMPARE_FUNCTION(_funcName, origFunc) \
void _funcName(const XTensor * a, XTensor * b, DTYPE number) \ void _funcName(const XTensor * a, XTensor * b, DTYPE number) \
{ \ { \
CheckNTErrors((XTensor::IsSameShaped(a, b)), \ CheckNTErrors((IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \ "Input tensors should have the same type!"); \
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!"); \ CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!"); \
/* run it on GPUs */ \ /* run it on GPUs */ \
...@@ -96,7 +97,7 @@ XTensor funcName(const XTensor &a, DTYPE number) ...@@ -96,7 +97,7 @@ XTensor funcName(const XTensor &a, DTYPE number)
#define SIMPLE_COMPARE_FUNCTION_VOID(funcName, _funcName, operationId) \ #define SIMPLE_COMPARE_FUNCTION_VOID(funcName, _funcName, operationId) \
void funcName(const XTensor &a, XTensor &b, DTYPE number) \ void funcName(const XTensor &a, XTensor &b, DTYPE number) \
{ \ { \
if (!b.isInit || !XTensor::IsSameShaped(&a, &b)) { \ if (!b.isInit || !IsSameShaped(&a, &b)) { \
InitTensor(&b, &a); \ InitTensor(&b, &a); \
} \ } \
_funcName(&a, &b, number); \ _funcName(&a, &b, number); \
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <math.h> #include <math.h>
#include "../../XTensor.h" #include "../../XTensor.h"
#include "../../XName.h" #include "../../XName.h"
#include "../shape/IsSameShaped.h"
#include "Normalize.h" #include "Normalize.h"
#include "Normalize.cuh" #include "Normalize.cuh"
...@@ -47,9 +48,9 @@ void _Normalize(const XTensor * input, XTensor * output, int dim, ...@@ -47,9 +48,9 @@ void _Normalize(const XTensor * input, XTensor * output, int dim,
const XTensor * a, const XTensor * b, DTYPE epsilon) const XTensor * a, const XTensor * b, DTYPE epsilon)
{ {
int dimRDI = input->order - dim - 1; int dimRDI = input->order - dim - 1;
CheckNTErrors((XTensor::IsSameShaped(input, output)), "Unmatched input tensors!"); CheckNTErrors((IsSameShaped(input, output)), "Unmatched input tensors!");
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Unmatched input tensors"); CheckNTErrors((IsSameShaped(a, b)), "Unmatched input tensors");
CheckNTErrors((XTensor::IsSameShaped(mean, var)), "Unmatched input tensors"); CheckNTErrors((IsSameShaped(mean, var)), "Unmatched input tensors");
CheckNTErrors((input && output && mean && var && a && b), "Empty input tensors!"); CheckNTErrors((input && output && mean && var && a && b), "Empty input tensors!");
CheckNTErrors((dimRDI >= 0 && dimRDI < input->order), "Incorrect reduction dimension!"); CheckNTErrors((dimRDI >= 0 && dimRDI < input->order), "Incorrect reduction dimension!");
CheckNTErrors((input->order == mean->order + 1), "Incorrect reduction dimension!"); CheckNTErrors((input->order == mean->order + 1), "Incorrect reduction dimension!");
...@@ -203,7 +204,7 @@ void Normalize(const XTensor &input, XTensor &output, int dim, ...@@ -203,7 +204,7 @@ void Normalize(const XTensor &input, XTensor &output, int dim,
const XTensor &mean, const XTensor &var, const XTensor &mean, const XTensor &var,
const XTensor &a, const XTensor &b, DTYPE epsilon) const XTensor &a, const XTensor &b, DTYPE epsilon)
{ {
if (!output.isInit || !XTensor::IsSameShaped(&input, &output)) { if (!output.isInit || !IsSameShaped(&input, &output)) {
InitTensor(&output, &input); InitTensor(&output, &input);
} }
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "../../XTensor.h" #include "../../XTensor.h"
#include "../../XName.h" #include "../../XName.h"
#include "../../XUtility.h" #include "../../XUtility.h"
#include "../shape/IsSameShaped.h"
#include "ScaleAndShift.h" #include "ScaleAndShift.h"
#include "ScaleAndShift.cuh" #include "ScaleAndShift.cuh"
...@@ -147,7 +148,7 @@ b = a * scale + shift ...@@ -147,7 +148,7 @@ b = a * scale + shift
*/ */
void ScaleAndShift(const XTensor & a, XTensor & b, DTYPE scale, DTYPE shift) void ScaleAndShift(const XTensor & a, XTensor & b, DTYPE scale, DTYPE shift)
{ {
if (!b.isInit || !XTensor::IsSameShaped(&a, &b)) { if (!b.isInit || !IsSameShaped(&a, &b)) {
InitTensor(&b, &a); InitTensor(&b, &a);
} }
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <math.h> #include <math.h>
#include "../../XName.h" #include "../../XName.h"
#include "../shape/IsSameShaped.h"
#include "Unary.h" #include "Unary.h"
#include "Unary.cuh" #include "Unary.cuh"
...@@ -77,7 +78,7 @@ void _funcName(const XTensor * a, XTensor * b) ...@@ -77,7 +78,7 @@ void _funcName(const XTensor * a, XTensor * b)
_cudaFuncName(a, b); \ _cudaFuncName(a, b); \
return; \ return; \
} \ } \
CheckNTErrors((XTensor::IsSameShaped(a, b)), \ CheckNTErrors((IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \ "Input tensors should have the same type!"); \
if (a->dataType == X_INT) { \ if (a->dataType == X_INT) { \
int * d = (int*)a->data; \ int * d = (int*)a->data; \
...@@ -108,7 +109,7 @@ void _funcName(const XTensor * a, XTensor * b) ...@@ -108,7 +109,7 @@ void _funcName(const XTensor * a, XTensor * b)
if (a->devID >= 0) { \ if (a->devID >= 0) { \
ShowNTErrors("No GPU devices support!") \ ShowNTErrors("No GPU devices support!") \
} \ } \
CheckNTErrors((XTensor::IsSameShaped(a, b)), \ CheckNTErrors((IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \ "Input tensors should have the same type!"); \
if (a->dataType == X_INT) { \ if (a->dataType == X_INT) { \
int * d = (int*)a->data; \ int * d = (int*)a->data; \
...@@ -160,7 +161,7 @@ XTensor funcName(const XTensor & a) ...@@ -160,7 +161,7 @@ XTensor funcName(const XTensor & a)
#define SIMPLE_UNARY_FUNCTION_VOID(funcName, _funcName, operationId) \ #define SIMPLE_UNARY_FUNCTION_VOID(funcName, _funcName, operationId) \
void funcName(const XTensor & a, XTensor & b) \ void funcName(const XTensor & a, XTensor & b) \
{ \ { \
if (!b.isInit || !XTensor::IsSameShaped(&a, &b)) { \ if (!b.isInit || !IsSameShaped(&a, &b)) { \
InitTensor(&b, &a); \ InitTensor(&b, &a); \
} \ } \
_funcName(&a, &b); \ _funcName(&a, &b); \
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <math.h> #include <math.h>
#include "../../XDevice.h" #include "../../XDevice.h"
#include "../../XName.h" #include "../../XName.h"
#include "../shape/IsSameShaped.h"
#include "Unary.h" #include "Unary.h"
#include "Unary.cuh" #include "Unary.cuh"
#include<cuda_runtime.h> #include<cuda_runtime.h>
...@@ -154,7 +155,7 @@ void Kernel##funcName(T * a, T * b, int size) \ ...@@ -154,7 +155,7 @@ void Kernel##funcName(T * a, T * b, int size) \
} \ } \
void _Cuda##funcName(const XTensor * a, XTensor * b) \ void _Cuda##funcName(const XTensor * a, XTensor * b) \
{ \ { \
CheckNTErrors((XTensor::IsSameShaped(a, b)), \ CheckNTErrors((IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \ "Input tensors should have the same type!"); \
CheckNTErrors(a->isSparse == false, "TODO!"); \ CheckNTErrors(a->isSparse == false, "TODO!"); \
\ \
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "../../XTensor.h" #include "../../XTensor.h"
#include "../shape/IsSameShaped.h"
#include "CopyInGrid.h" #include "CopyInGrid.h"
#include "CopyBlocksInGrid.h" #include "CopyBlocksInGrid.h"
...@@ -38,7 +39,7 @@ in the k-th grid ...@@ -38,7 +39,7 @@ in the k-th grid
*/ */
void _CopyInGrid(const XTensor * s, XTensor * t, int * index, int blockDim, int blockNumInGrid, bool isIndexOnDev) void _CopyInGrid(const XTensor * s, XTensor * t, int * index, int blockDim, int blockNumInGrid, bool isIndexOnDev)
{ {
CheckNTErrors((XTensor::IsSameShaped(s, t)), "Unmatched tensors!"); CheckNTErrors((IsSameShaped(s, t)), "Unmatched tensors!");
int blockDimRDI = s->order - blockDim - 1; int blockDimRDI = s->order - blockDim - 1;
int blockSize = 1; int blockSize = 1;
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <math.h> #include <math.h>
#include "ReduceSum.h" #include "ReduceSum.h"
#include "ReduceSum.cuh" #include "ReduceSum.cuh"
#include "../shape/IsSameShaped.h"
#include "../../XName.h" #include "../../XName.h"
#include "../../XBLAS.h" #include "../../XBLAS.h"
#include "VectorBuffer.h" #include "VectorBuffer.h"
...@@ -51,7 +52,7 @@ void _ReduceSum(const XTensor * input, XTensor * output, int dim, const XTensor ...@@ -51,7 +52,7 @@ void _ReduceSum(const XTensor * input, XTensor * output, int dim, const XTensor
CheckNTErrors((input->order == output->order + 1), "Incorrect tensor sizes!"); CheckNTErrors((input->order == output->order + 1), "Incorrect tensor sizes!");
CheckNTErrors((input->order > dim && dim >=0), "Illegal dimension to reduce!"); CheckNTErrors((input->order > dim && dim >=0), "Illegal dimension to reduce!");
CheckNTErrors((input->dataType == output->dataType), "Unmatched data types!"); CheckNTErrors((input->dataType == output->dataType), "Unmatched data types!");
CheckNTErrors((shift == NULL || XTensor::IsSameShaped(output, shift)), "Incorrect shift tensor size!"); CheckNTErrors((shift == NULL || IsSameShaped(output, shift)), "Incorrect shift tensor size!");
int dimRDI = input->order - dim - 1; int dimRDI = input->order - dim - 1;
CheckNTErrors(dimRDI >= 0, "Wrong dimension!"); CheckNTErrors(dimRDI >= 0, "Wrong dimension!");
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "../../XTensor.h" #include "../../XTensor.h"
#include "../../XName.h" #include "../../XName.h"
#include "../shape/IsSameShaped.h"
#include "Concatenate.h" #include "Concatenate.h"
#include "Merge.h" #include "Merge.h"
#include "ConcatenateSolely.h" #include "ConcatenateSolely.h"
...@@ -44,7 +45,7 @@ void _Concatenate(const TensorList * smalls, XTensor * big, int dim) ...@@ -44,7 +45,7 @@ void _Concatenate(const TensorList * smalls, XTensor * big, int dim)
XTensor * a = (XTensor*)smalls->GetItem(i - 1); XTensor * a = (XTensor*)smalls->GetItem(i - 1);
XTensor * b = (XTensor*)smalls->GetItem(i); XTensor * b = (XTensor*)smalls->GetItem(i);
CheckNTErrors((a && b), "Empty input tensors!"); CheckNTErrors((a && b), "Empty input tensors!");
if (!XTensor::IsSameShaped(a, b)) if (!IsSameShaped(a, b))
uniform = false; uniform = false;
} }
...@@ -76,7 +77,7 @@ XTensor Concatenate(const TensorList &smalls, int dim) ...@@ -76,7 +77,7 @@ XTensor Concatenate(const TensorList &smalls, int dim)
XTensor * a = (XTensor*)smalls.GetItem(i - 1); XTensor * a = (XTensor*)smalls.GetItem(i - 1);
XTensor * b = (XTensor*)smalls.GetItem(i); XTensor * b = (XTensor*)smalls.GetItem(i);
CheckNTErrors((a && b), "Empty input tensors!"); CheckNTErrors((a && b), "Empty input tensors!");
if (!XTensor::IsSameShaped(a, b)) if (!IsSameShaped(a, b))
uniform = false; uniform = false;
} }
XTensor * tensor = (XTensor*)smalls.GetItem(0); XTensor * tensor = (XTensor*)smalls.GetItem(0);
...@@ -189,7 +190,7 @@ void Concatenate(const TensorList & smalls, XTensor & big, int dim) ...@@ -189,7 +190,7 @@ void Concatenate(const TensorList & smalls, XTensor & big, int dim)
XTensor * a = (XTensor*)smalls.GetItem(i - 1); XTensor * a = (XTensor*)smalls.GetItem(i - 1);
XTensor * b = (XTensor*)smalls.GetItem(i); XTensor * b = (XTensor*)smalls.GetItem(i);
CheckNTErrors((a && b), "Empty input tensors!"); CheckNTErrors((a && b), "Empty input tensors!");
if (!XTensor::IsSameShaped(a, b)) if (!IsSameShaped(a, b))
uniform = false; uniform = false;
} }
...@@ -290,7 +291,7 @@ XTensor Concatenate(const XTensor &smallA, const XTensor &smallB, int dim) ...@@ -290,7 +291,7 @@ XTensor Concatenate(const XTensor &smallA, const XTensor &smallB, int dim)
XTensor * a = (XTensor*)smalls.Get(i - 1); XTensor * a = (XTensor*)smalls.Get(i - 1);
XTensor * b = (XTensor*)smalls.Get(i); XTensor * b = (XTensor*)smalls.Get(i);
CheckNTErrors((a && b), "Empty input tensors!"); CheckNTErrors((a && b), "Empty input tensors!");
if (!XTensor::IsSameShaped(a, b)) if (!IsSameShaped(a, b))
uniform = false; uniform = false;
} }
XTensor * tensor = (XTensor*)smalls.Get(0); XTensor * tensor = (XTensor*)smalls.Get(0);
......
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2019-10-22
*/
#include "../../XTensor.h"
#include "IsSameShaped.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/*
check whether the two matrices are in the same type and size
>> a - input tensor
>> b - anther tensor to compare with
<< return - whether the two input tensors are identical
*/
bool IsSameShaped(const XTensor * a, const XTensor * b)
{
if(a == NULL || b == NULL)
return false;
if(a->order != b->order)
return false;
for(int i = 0; i < a->order; i++){
if(a->dimSizeRDI[i] != b->dimSizeRDI[i])
return false;
}
if(a->dataType != b->dataType)
return false;
if(a->denseRatio != b->denseRatio)
return false;
if(a->isSparse != b->isSparse)
return false;
return true;
}
/*
check whether the three matrices are in the same type and size
>> a - input tensor
>> b - anther tensor to compare with
>> c - a tensor again
<< return - whether the two input tensors are identical
*/
bool IsSameShaped(const XTensor * a, const XTensor * b, const XTensor * c)
{
return IsSameShaped(a, b) && IsSameShaped(a, c);
}
} // namespace nts(NiuTrans.Tensor)
\ No newline at end of file
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2019-10-22
*/
#ifndef __ISSAMESHAPED_H__
#define __ISSAMESHAPED_H__
#include "../../XTensor.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* judge whether the two matrices are in the same type and size */
bool IsSameShaped(const XTensor * a, const XTensor * b);
/* judge whether the three matrices are in the same type and size */
bool IsSameShaped(const XTensor * a, const XTensor * b, const XTensor * c);
} // namespace nts(NiuTrans.Tensor)
#endif // __ISSAMESHAPED_H__
\ No newline at end of file
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "../../XTensor.h" #include "../../XTensor.h"
#include "../../XUtility.h" #include "../../XUtility.h"
#include "../../XName.h" #include "../../XName.h"
#include "../shape/IsSameShaped.h"
#include "Merge.h" #include "Merge.h"
#include "MakeMergeBlockIndex.h" #include "MakeMergeBlockIndex.h"
#include "../movement/CopyBlocksOnSite.h" #include "../movement/CopyBlocksOnSite.h"
...@@ -433,7 +434,7 @@ merge two tensors into a big tensor (return an XTensor structure) ...@@ -433,7 +434,7 @@ merge two tensors into a big tensor (return an XTensor structure)
*/ */
XTensor Merge(const XTensor &smallA, const XTensor &smallB, int whereToMerge) XTensor Merge(const XTensor &smallA, const XTensor &smallB, int whereToMerge)
{ {
CheckNTErrors(XTensor::IsSameShaped(&smallA, &smallB), CheckNTErrors(IsSameShaped(&smallA, &smallB),
"The two tensors must be of the same size!"); "The two tensors must be of the same size!");
int order = smallA.order; int order = smallA.order;
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "../../XTensor.h" #include "../../XTensor.h"
#include "../../XName.h" #include "../../XName.h"
#include "../movement/CopyValues.h" #include "../movement/CopyValues.h"
#include "../shape/IsSameShaped.h"
#include "Reshape.h" #include "Reshape.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -52,7 +53,7 @@ XTensor Reshape(XTensor &s, int order, int * dimSize) ...@@ -52,7 +53,7 @@ XTensor Reshape(XTensor &s, int order, int * dimSize)
void Reshape(XTensor &s, XTensor &t, int order, int * dimSize) void Reshape(XTensor &s, XTensor &t, int order, int * dimSize)
{ {
if (!t.isInit || !XTensor::IsSameShaped(&t, &s)) { if (!t.isInit || !IsSameShaped(&t, &s)) {
InitTensor(&t, &s); InitTensor(&t, &s);
} }
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "Squeeze.h" #include "Squeeze.h"
#include "../movement/CopyValues.h" #include "../movement/CopyValues.h"
#include "../shape/IsSameShaped.h"
#include "../../XName.h" #include "../../XName.h"
namespace nts{ // namespace nts(NiuTrans.Tensor) namespace nts{ // namespace nts(NiuTrans.Tensor)
...@@ -37,7 +38,7 @@ void _Squeeze(XTensor * source, XTensor * target, int leadingDim) ...@@ -37,7 +38,7 @@ void _Squeeze(XTensor * source, XTensor * target, int leadingDim)
{ {
int order = target->order; int order = target->order;
CheckNTErrors(XTensor::IsSameShaped(source, target), CheckNTErrors(IsSameShaped(source, target),
"The source and target tensor must be of the same size!"); "The source and target tensor must be of the same size!");
CheckNTErrors(leadingDim >= -1 && leadingDim < order, CheckNTErrors(leadingDim >= -1 && leadingDim < order,
"Wrong leading dimension"); "Wrong leading dimension");
...@@ -130,7 +131,7 @@ XTensor Squeeze(XTensor & source, int leadingDim) ...@@ -130,7 +131,7 @@ XTensor Squeeze(XTensor & source, int leadingDim)
void Squeeze(XTensor & source, XTensor & target, int leadingDim) void Squeeze(XTensor & source, XTensor & target, int leadingDim)
{ {
if (!target.isInit || !XTensor::IsSameShaped(&source, &target)) { if (!target.isInit || !IsSameShaped(&source, &target)) {
InitTensor(&target, &source); InitTensor(&target, &source);
} }
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <math.h> #include <math.h>
#include "../../XTensor.h" #include "../../XTensor.h"
#include "../movement/CopyValues.h" #include "../movement/CopyValues.h"
#include "../shape/IsSameShaped.h"
#include "../../XUtility.h" #include "../../XUtility.h"
#include "../../XName.h" #include "../../XName.h"
#include "Sort.h" #include "Sort.h"
...@@ -40,7 +41,7 @@ void _Sort(const XTensor * a, XTensor * b, XTensor * index, int dim) ...@@ -40,7 +41,7 @@ void _Sort(const XTensor * a, XTensor * b, XTensor * index, int dim)
{ {
dim = MODX(dim, a->order); dim = MODX(dim, a->order);
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!"); CheckNTErrors((IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((dim >= 0 && dim < a->order), "Incorrect dimension specified!"); CheckNTErrors((dim >= 0 && dim < a->order), "Incorrect dimension specified!");
CheckNTErrors((a->order == index->order), "Unmatched input tensors!"); CheckNTErrors((a->order == index->order), "Unmatched input tensors!");
CheckNTErrors((index->dataType == X_INT), "Wrong data type!"); CheckNTErrors((index->dataType == X_INT), "Wrong data type!");
......
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2019-10-22
*/
#include "../../XTensor.h"
#include "../../XUtility.h"
#include "CheckData.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* compare two numbers */
bool IsFloatEqual(DTYPE a, DTYPE b, float absError, float relError)
{
if(a == b)
return true;
if(fabs(a - b) < absError)
return true;
if(fabs(a) < fabs(b))
return (fabs((a - b) / b) < relError) ? true : false;
else
return (fabs((a - b) / a) < relError) ? true : false;
}
/* check whether the data array is the same as the answer
>> d - input data (it must be on CPUs)
>> num - number of data items
>> beg - where we start this in the data array of the tensor
*/
bool CheckData(const XTensor * tensor, const void * d, int num, int beg)
{
if (tensor->data == NULL || d == NULL)
return false;
CheckNTErrors(!tensor->isSparse, "TODO");
CheckNTErrors(num == tensor->unitNum - beg, "Illegal size!");
if (tensor->devID < 0) {
return !memcmp(tensor->data, d, num * tensor->unitSize);
}
#ifdef USE_CUDA
else {
char * copy = new char[num * tensor->unitSize];
XMemCopy(copy, -1, tensor->data, tensor->devID, num * tensor->unitSize);
int cmpResult = memcmp(copy, d, num * tensor->unitSize);
bool result = (cmpResult == 0) ? true : false;
delete[] copy;
return result;
}
#endif
return true;
}
/* check whether the data array is the same as the "answer" */
bool CheckData(const XTensor * tensor, const void * d, int num, float tolerance, int beg)
{
if (tensor->data == NULL || d == NULL)
return false;
CheckNTErrors(!tensor->isSparse, "TODO");
CheckNTErrors(num == tensor->unitNum - beg, "Illegal size!");
DTYPE * valuePrt = (DTYPE*)tensor->data;
DTYPE value = 0;
DTYPE * answerPrt = (DTYPE*)d;
for (int i = beg; i < num; i++) {
value = ToCPU(tensor->devID, valuePrt);
if(IsFloatEqual(value, *answerPrt, tolerance, 1e-4F) == false)
return false;
valuePrt++;
answerPrt++;
}
return true;
}
} // namespace nts(NiuTrans.Tensor)
\ No newline at end of file
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2019-10-22
*/
#ifndef __ISSAMESHAPED_H__
#define __ISSAMESHAPED_H__
#include "../../XTensor.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* check whether the data array is the same as the answer */
bool CheckData(const XTensor * tensor, const void * answer, int num, int beg = 0);
/* check whether the data array is the same as the answer */
bool CheckData(const XTensor * tensor, const void * answer, int num, float tolerance, int beg = 0);
} // namespace nts(NiuTrans.Tensor)
#endif // __ISSAMESHAPED_H__
\ No newline at end of file
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <stdlib.h> #include <stdlib.h>
#include "../XName.h" #include "../XName.h"
#include "../../tensor/core/shape/IsSameShaped.h"
#include "HardTanH.h" #include "HardTanH.h"
#include "HardTanH.cuh" #include "HardTanH.cuh"
...@@ -36,7 +37,7 @@ y = 1 if x > 1 ...@@ -36,7 +37,7 @@ y = 1 if x > 1
*/ */
void _HardTanH(const XTensor * x, XTensor * y) void _HardTanH(const XTensor * x, XTensor * y)
{ {
CheckNTErrors(XTensor::IsSameShaped(x, y), CheckNTErrors(IsSameShaped(x, y),
"The input tensor and output tensor must have the same shape!") "The input tensor and output tensor must have the same shape!")
#ifdef USE_CUDA #ifdef USE_CUDA
...@@ -87,7 +88,7 @@ XTensor HardTanH(const XTensor &x) ...@@ -87,7 +88,7 @@ XTensor HardTanH(const XTensor &x)
void HardTanH(const XTensor &x, XTensor &y) void HardTanH(const XTensor &x, XTensor &y)
{ {
if (!y.isInit || !XTensor::IsSameShaped(&y, &x)) { if (!y.isInit || !IsSameShaped(&y, &x)) {
InitTensor(&y, &x); InitTensor(&y, &x);
} }
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "../XName.h" #include "../XName.h"
#include "../XUtility.h" #include "../XUtility.h"
#include "../core/movement/CopyValues.h" #include "../core/movement/CopyValues.h"
#include "../core/shape/IsSameShaped.h"
namespace nts{ // namespace nts(NiuTrans.Tensor) namespace nts{ // namespace nts(NiuTrans.Tensor)
...@@ -33,7 +34,7 @@ identity function y = x ...@@ -33,7 +34,7 @@ identity function y = x
*/ */
void _Identity(const XTensor * x, XTensor * y) void _Identity(const XTensor * x, XTensor * y)
{ {
CheckNTErrors(XTensor::IsSameShaped(x, y), CheckNTErrors(IsSameShaped(x, y),
"The input tensor and output tensor must have the same shape!") "The input tensor and output tensor must have the same shape!")
_CopyValues(x, y); _CopyValues(x, y);
} }
...@@ -63,7 +64,7 @@ XTensor Identity(const XTensor &x) ...@@ -63,7 +64,7 @@ XTensor Identity(const XTensor &x)
void Identity(const XTensor &x, XTensor &y) void Identity(const XTensor &x, XTensor &y)
{ {
if (!y.isInit || !y.IsSameShaped(&y, &x)) { if (!y.isInit || !IsSameShaped(&y, &x)) {
InitTensor(&y, &x); InitTensor(&y, &x);
} }
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include "../core/reduce/ReduceSum.h" #include "../core/reduce/ReduceSum.h"
#include "../core/reduce/ReduceMax.h" #include "../core/reduce/ReduceMax.h"
#include "../core/movement/CopyValues.h" #include "../core/movement/CopyValues.h"
#include "../core/shape/IsSameShaped.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -210,7 +211,7 @@ void LogSoftmax(const XTensor &x, XTensor &y, int leadDim) ...@@ -210,7 +211,7 @@ void LogSoftmax(const XTensor &x, XTensor &y, int leadDim)
if (ld < 0) if (ld < 0)
ld = x.order - 1; ld = x.order - 1;
if (!y.isInit || !XTensor::IsSameShaped(&y, &x)) { if (!y.isInit || !IsSameShaped(&y, &x)) {
InitTensor(&y, &x); InitTensor(&y, &x);
} }
...@@ -352,7 +353,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x, ...@@ -352,7 +353,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
} }
} }
else { else {
CheckNTErrors((XTensor::IsSameShaped(gold, y)), "The tensors must be of the same size!"); CheckNTErrors((IsSameShaped(gold, y)), "The tensors must be of the same size!");
for (int k = 0; k < blockNum; k++) { for (int k = 0; k < blockNum; k++) {
gp = (DTYPE*)gold->data + k * blockSize; gp = (DTYPE*)gold->data + k * blockSize;
op = (DTYPE*)y->data + k * blockSize; op = (DTYPE*)y->data + k * blockSize;
...@@ -406,7 +407,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x, ...@@ -406,7 +407,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
} }
} }
else { else {
CheckNTErrors((XTensor::IsSameShaped(gold, y)), "The tensors must be of the same size!"); CheckNTErrors((IsSameShaped(gold, y)), "The tensors must be of the same size!");
for (int k = 0; k < blockNum; k++) { for (int k = 0; k < blockNum; k++) {
gp = (DTYPE*)gold->data + k * blockSize; gp = (DTYPE*)gold->data + k * blockSize;
op = (DTYPE*)y->data + k * blockSize; op = (DTYPE*)y->data + k * blockSize;
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "../core/arithmetic/MultiplyDim.h" #include "../core/arithmetic/MultiplyDim.h"
#include "../core/reduce/ReduceSum.cuh" #include "../core/reduce/ReduceSum.cuh"
#include "../core/reduce/ReduceMax.cuh" #include "../core/reduce/ReduceMax.cuh"
#include "../core/shape/IsSameShaped.h"
#include "../XDevice.h" #include "../XDevice.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -430,7 +431,7 @@ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x, ...@@ -430,7 +431,7 @@ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
dedx->dimSize[0], dedx->dimSize[1], gold->unitNumNonZero, lossName); dedx->dimSize[0], dedx->dimSize[1], gold->unitNumNonZero, lossName);
} }
else { else {
CheckNTErrors((XTensor::IsSameShaped(gold, y)), "The tensors must be of the same size!"); CheckNTErrors((IsSameShaped(gold, y)), "The tensors must be of the same size!");
for (int k = 0; k < blockNum; k++) { for (int k = 0; k < blockNum; k++) {
GDevs.GetCudaThread(x->devID, blockSize, cudaGridSize, cudaBlockSize); GDevs.GetCudaThread(x->devID, blockSize, cudaGridSize, cudaBlockSize);
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "Loss.h" #include "Loss.h"
#include "Loss.cuh" #include "Loss.cuh"
#include "../core/getandset/SetData.h" #include "../core/getandset/SetData.h"
#include "../core/shape/IsSameShaped.h"
namespace nts{ // namespace nts(NiuTrans.Tensor) namespace nts{ // namespace nts(NiuTrans.Tensor)
...@@ -48,7 +49,7 @@ DTYPE _LossCompute(XTensor * gold, XTensor * output, LOSS_FUNCTION_NAME LFName, ...@@ -48,7 +49,7 @@ DTYPE _LossCompute(XTensor * gold, XTensor * output, LOSS_FUNCTION_NAME LFName,
DTYPE error = 0.0F; DTYPE error = 0.0F;
if (output->devID < 0) { if (output->devID < 0) {
CheckNTErrors((gLen >= 0 && gLen <= output->unitNum), "Illegal input length!"); CheckNTErrors((gLen >= 0 && gLen <= output->unitNum), "Illegal input length!");
CheckNTErrors((XTensor::IsSameShaped(gold, output)), "The input tensors must be of the same size!"); CheckNTErrors((IsSameShaped(gold, output)), "The input tensors must be of the same size!");
CheckNTErrors((gold->dimSizeRDI[0] == 1 && output->dimSizeRDI[0] == 1), "TODO!"); CheckNTErrors((gold->dimSizeRDI[0] == 1 && output->dimSizeRDI[0] == 1), "TODO!");
CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!"); CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!");
CheckNTErrors((gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE), "TODO!"); CheckNTErrors((gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE), "TODO!");
...@@ -205,7 +206,7 @@ DTYPE _LossComputeForLogScale(XTensor * gold, XTensor * output, ...@@ -205,7 +206,7 @@ DTYPE _LossComputeForLogScale(XTensor * gold, XTensor * output,
int leadDim, int gBeg, int gLen, int oBeg) int leadDim, int gBeg, int gLen, int oBeg)
{ {
CheckNTErrors(gLen >= 0 && gLen <= output->unitNum, "Illegal input length!"); CheckNTErrors(gLen >= 0 && gLen <= output->unitNum, "Illegal input length!");
CheckNTErrors(XTensor::IsSameShaped(gold, output), "The input tensors must be of the same size!"); CheckNTErrors(IsSameShaped(gold, output), "The input tensors must be of the same size!");
CheckNTErrors(gold->dimSizeRDI[0] == 1 && output->dimSizeRDI[0] == 1, "TODO!"); CheckNTErrors(gold->dimSizeRDI[0] == 1 && output->dimSizeRDI[0] == 1, "TODO!");
CheckNTErrors(gold->order > leadDim && leadDim >= 0, "Illegal leading dimension!"); CheckNTErrors(gold->order > leadDim && leadDim >= 0, "Illegal leading dimension!");
CheckNTErrors(gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE, "TODO!"); CheckNTErrors(gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE, "TODO!");
...@@ -401,7 +402,7 @@ void _LossBackward(XTensor * dedy, XTensor * t, XTensor * y, ...@@ -401,7 +402,7 @@ void _LossBackward(XTensor * dedy, XTensor * t, XTensor * y,
if (y->devID < 0) { if (y->devID < 0) {
CheckNTErrors(tLen <= y->unitNum, "Illegal input length!"); CheckNTErrors(tLen <= y->unitNum, "Illegal input length!");
CheckNTErrors(XTensor::IsSameShaped(t, y)&& XTensor::IsSameShaped(dedy, y), CheckNTErrors(IsSameShaped(t, y)&& IsSameShaped(dedy, y),
"The input tensors must be of the same size!"); "The input tensors must be of the same size!");
CheckNTErrors((dedy->devID == t->devID) && (dedy->devID == y->devID), CheckNTErrors((dedy->devID == t->devID) && (dedy->devID == y->devID),
"Tensor must be on the same device!"); "Tensor must be on the same device!");
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "../core/arithmetic/Multiply.h" #include "../core/arithmetic/Multiply.h"
#include "../core/reduce/ReduceSum.h" #include "../core/reduce/ReduceSum.h"
#include "../core/movement/CopyValues.h" #include "../core/movement/CopyValues.h"
#include "../core/shape/IsSameShaped.h"
namespace nts{ // namespace nts(NiuTrans.Tensor) namespace nts{ // namespace nts(NiuTrans.Tensor)
...@@ -54,7 +55,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName, ...@@ -54,7 +55,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
bool isLogOutput, int leadDim, int gBeg, int gLen, int yBeg) bool isLogOutput, int leadDim, int gBeg, int gLen, int yBeg)
{ {
CheckNTErrors((gLen >= 0 && gLen <= y->unitNum), "Illegal input length!"); CheckNTErrors((gLen >= 0 && gLen <= y->unitNum), "Illegal input length!");
CheckNTErrors((XTensor::IsSameShaped(gold, y)), "The input tensors must be of the same size!"); CheckNTErrors((IsSameShaped(gold, y)), "The input tensors must be of the same size!");
CheckNTErrors((gold->dimSizeRDI[0] == 1 && y->dimSizeRDI[0] == 1), "TODO!"); CheckNTErrors((gold->dimSizeRDI[0] == 1 && y->dimSizeRDI[0] == 1), "TODO!");
CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!"); CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!");
CheckNTErrors((gold->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE), "TODO!"); CheckNTErrors((gold->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE), "TODO!");
...@@ -331,7 +332,7 @@ void _CudaLossBackward(XTensor * dedy, XTensor * t, XTensor * y, ...@@ -331,7 +332,7 @@ void _CudaLossBackward(XTensor * dedy, XTensor * t, XTensor * y,
int leadDim, int tBeg, int tLen, int yBeg) int leadDim, int tBeg, int tLen, int yBeg)
{ {
CheckNTErrors((tLen <= y->unitNum), "Illegal input length!"); CheckNTErrors((tLen <= y->unitNum), "Illegal input length!");
CheckNTErrors((XTensor::IsSameShaped(t, y)&& XTensor::IsSameShaped(dedy, y)), CheckNTErrors((IsSameShaped(t, y)&& IsSameShaped(dedy, y)),
"The input tensors must be of the same size!"); "The input tensors must be of the same size!");
CheckNTErrors(((dedy->devID == t->devID) && (dedy->devID == y->devID)), CheckNTErrors(((dedy->devID == t->devID) && (dedy->devID == y->devID)),
"Tensor must be on the same device!"); "Tensor must be on the same device!");
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "../XName.h" #include "../XName.h"
#include "../core/shape/IsSameShaped.h"
#include "Rectify.h" #include "Rectify.h"
#include "Rectify.cuh" #include "Rectify.cuh"
...@@ -32,7 +33,7 @@ rectify function y = max(0, x) ...@@ -32,7 +33,7 @@ rectify function y = max(0, x)
*/ */
void _Rectify(const XTensor * x, XTensor * y) void _Rectify(const XTensor * x, XTensor * y)
{ {
CheckNTErrors(XTensor::IsSameShaped(x, y), CheckNTErrors(IsSameShaped(x, y),
"The input tensor and output tensor must have the same shape!") "The input tensor and output tensor must have the same shape!")
#ifdef USE_CUDA #ifdef USE_CUDA
...@@ -79,7 +80,7 @@ XTensor Rectify(const XTensor &x) ...@@ -79,7 +80,7 @@ XTensor Rectify(const XTensor &x)
void Rectify(const XTensor &x, XTensor &y) void Rectify(const XTensor &x, XTensor &y)
{ {
if (!y.isInit || !XTensor::IsSameShaped(&y, &x)) { if (!y.isInit || !IsSameShaped(&y, &x)) {
InitTensor(&y, &x); InitTensor(&y, &x);
} }
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "../XName.h" #include "../XName.h"
#include "../core/shape/IsSameShaped.h"
#include <math.h> #include <math.h>
#include "Sigmoid.h" #include "Sigmoid.h"
#include "Sigmoid.cuh" #include "Sigmoid.cuh"
...@@ -34,7 +35,7 @@ sigmoid function y = 1/(1+exp(-x)) ...@@ -34,7 +35,7 @@ sigmoid function y = 1/(1+exp(-x))
*/ */
void _Sigmoid(const XTensor * x, XTensor * y) void _Sigmoid(const XTensor * x, XTensor * y)
{ {
CheckNTErrors(XTensor::IsSameShaped(x, y), CheckNTErrors(IsSameShaped(x, y),
"The input tensor and output tensor must have the same shape!") "The input tensor and output tensor must have the same shape!")
#ifdef USE_CUDA #ifdef USE_CUDA
...@@ -82,7 +83,7 @@ XTensor Sigmoid(const XTensor &x) ...@@ -82,7 +83,7 @@ XTensor Sigmoid(const XTensor &x)
void Sigmoid(const XTensor &x, XTensor &y) void Sigmoid(const XTensor &x, XTensor &y)
{ {
if (!y.isInit || !XTensor::IsSameShaped(&y, &x)) { if (!y.isInit || !IsSameShaped(&y, &x)) {
InitTensor(&y, &x); InitTensor(&y, &x);
} }
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "../XUtility.h" #include "../XUtility.h"
#include "../core/reduce/ReduceSum.h" #include "../core/reduce/ReduceSum.h"
#include "../core/reduce/ReduceMax.h" #include "../core/reduce/ReduceMax.h"
#include "../core/shape/IsSameShaped.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -156,7 +157,7 @@ void Softmax(const XTensor &x, XTensor &y, int leadDim) ...@@ -156,7 +157,7 @@ void Softmax(const XTensor &x, XTensor &y, int leadDim)
if (ld < 0) if (ld < 0)
ld = x.order - 1; ld = x.order - 1;
if (!y.isInit || !XTensor::IsSameShaped(&y, &x)) { if (!y.isInit || !IsSameShaped(&y, &x)) {
InitTensor(&y, &x); InitTensor(&y, &x);
} }
...@@ -252,7 +253,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x, ...@@ -252,7 +253,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
} }
} }
else{ else{
CheckNTErrors((XTensor::IsSameShaped(gold, y)), "The tensors must be of the same size!"); CheckNTErrors((IsSameShaped(gold, y)), "The tensors must be of the same size!");
for(int k = 0; k < blockNum; k++){ for(int k = 0; k < blockNum; k++){
gp = (DTYPE*)gold->data + k * blockSize; gp = (DTYPE*)gold->data + k * blockSize;
op = (DTYPE*)y->data + k * blockSize; op = (DTYPE*)y->data + k * blockSize;
...@@ -291,7 +292,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x, ...@@ -291,7 +292,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
} }
} }
else{ else{
CheckNTErrors((XTensor::IsSameShaped(gold, y)), "The tensors must be of the same size!"); CheckNTErrors((IsSameShaped(gold, y)), "The tensors must be of the same size!");
for(int k = 0; k < blockNum; k++){ for(int k = 0; k < blockNum; k++){
gp = (DTYPE*)gold->data + k * blockSize; gp = (DTYPE*)gold->data + k * blockSize;
op = (DTYPE*)y->data + k * blockSize; op = (DTYPE*)y->data + k * blockSize;
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "../core/arithmetic/Multiply.h" #include "../core/arithmetic/Multiply.h"
#include "../core/arithmetic/MultiplyDim.h" #include "../core/arithmetic/MultiplyDim.h"
#include "../core/shape/Unsqueeze.h" #include "../core/shape/Unsqueeze.h"
#include "../core/shape/IsSameShaped.h"
#include "../core/arithmetic/Sum.h" #include "../core/arithmetic/Sum.h"
#include "../XDevice.h" #include "../XDevice.h"
#include "../XUtility.h" #include "../XUtility.h"
...@@ -223,7 +224,7 @@ void _CudaSoftmaxSumMax(const XTensor * x, XTensor * y, int leadDim, XTensor * s ...@@ -223,7 +224,7 @@ void _CudaSoftmaxSumMax(const XTensor * x, XTensor * y, int leadDim, XTensor * s
{ {
CheckNTErrors((x->devID >= 0), "Forward computation of softmax must be run on GPUs."); CheckNTErrors((x->devID >= 0), "Forward computation of softmax must be run on GPUs.");
CheckNTErrors((x->devID == y->devID), "Tensors used in softmax are not on the same GPU."); CheckNTErrors((x->devID == y->devID), "Tensors used in softmax are not on the same GPU.");
CheckNTErrors((XTensor::IsSameShaped(x, y)), "Input tensors must be of the same size!"); CheckNTErrors((IsSameShaped(x, y)), "Input tensors must be of the same size!");
int leadDimRDI = y->order - leadDim - 1; int leadDimRDI = y->order - leadDim - 1;
int dimensionSize = y->dimSizeRDI[leadDimRDI]; int dimensionSize = y->dimSizeRDI[leadDimRDI];
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "../core/math/ScaleAndShift.h" #include "../core/math/ScaleAndShift.h"
#include "../core/reduce/ReduceSum.h" #include "../core/reduce/ReduceSum.h"
#include "../core/reduce/ReduceSumAll.h" #include "../core/reduce/ReduceSumAll.h"
#include "../core/shape/IsSameShaped.h"
namespace nts{ // namespace nts(NiuTrans.Tensor) namespace nts{ // namespace nts(NiuTrans.Tensor)
...@@ -54,10 +55,10 @@ void _CrossEntropy(const XTensor * output, const XTensor * gold, ...@@ -54,10 +55,10 @@ void _CrossEntropy(const XTensor * output, const XTensor * gold,
int unitNum = output->dimSize[n]; int unitNum = output->dimSize[n];
CheckNTErrors(n >= 0 && n < output->order, "Wrong leadingDim!"); CheckNTErrors(n >= 0 && n < output->order, "Wrong leadingDim!");
CheckNTErrors(XTensor::IsSameShaped(output, gold), CheckNTErrors(IsSameShaped(output, gold),
"The output tensor and gold tensor must be of the same size!"); "The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == unitNum, "Wrong weight tensor!"); CheckNTErrors(weight == NULL || weight->unitNum == unitNum, "Wrong weight tensor!");
CheckNTErrors(padding == NULL || XTensor::IsSameShaped(padding, loss), CheckNTErrors(padding == NULL || IsSameShaped(padding, loss),
"The loss tensor and padding tensor must be same shape!"); "The loss tensor and padding tensor must be same shape!");
CheckNTErrors(loss->order == output->order - 1, "Wrong loss dimension!"); CheckNTErrors(loss->order == output->order - 1, "Wrong loss dimension!");
CheckNTErrors(gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE, "TODO!"); CheckNTErrors(gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE, "TODO!");
...@@ -101,11 +102,11 @@ void _CrossEntropyFast(const XTensor * output, const XTensor * gold, ...@@ -101,11 +102,11 @@ void _CrossEntropyFast(const XTensor * output, const XTensor * gold,
CheckNTErrors(n >= 0 && n < output->order, CheckNTErrors(n >= 0 && n < output->order,
"Wrong leading dimension!"); "Wrong leading dimension!");
CheckNTErrors(XTensor::IsSameShaped(output, gold), CheckNTErrors(IsSameShaped(output, gold),
"The output tensor and gold tensor must be of the same size!"); "The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize, CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize,
"Wrong weight tensor!"); "Wrong weight tensor!");
CheckNTErrors(padding == NULL || XTensor::IsSameShaped(padding, loss), CheckNTErrors(padding == NULL || IsSameShaped(padding, loss),
"The loss tensor and padding tensor must be same shape!"); "The loss tensor and padding tensor must be same shape!");
CheckNTErrors(loss->order == output->order - 1, CheckNTErrors(loss->order == output->order - 1,
"Wrong loss dimension!"); "Wrong loss dimension!");
...@@ -337,7 +338,7 @@ DTYPE _CrossEntropy(const XTensor * output, const XTensor * gold, ...@@ -337,7 +338,7 @@ DTYPE _CrossEntropy(const XTensor * output, const XTensor * gold,
int unitNum = output->dimSize[n]; int unitNum = output->dimSize[n];
CheckNTErrors(n >= 0 && n < output->order, "Wrong leadingDim!"); CheckNTErrors(n >= 0 && n < output->order, "Wrong leadingDim!");
CheckNTErrors(XTensor::IsSameShaped(output, gold), CheckNTErrors(IsSameShaped(output, gold),
"The output tensor and gold tensor must be of the same size!"); "The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == unitNum, "Wrong weight tensor!"); CheckNTErrors(weight == NULL || weight->unitNum == unitNum, "Wrong weight tensor!");
CheckNTErrors(padding == NULL || padding->order == output->order - 1, CheckNTErrors(padding == NULL || padding->order == output->order - 1,
...@@ -412,7 +413,7 @@ DTYPE _CrossEntropyFast(const XTensor * output, const XTensor * gold, ...@@ -412,7 +413,7 @@ DTYPE _CrossEntropyFast(const XTensor * output, const XTensor * gold,
CheckNTErrors(n >= 0 && n < output->order, CheckNTErrors(n >= 0 && n < output->order,
"Wrong leadingDim!"); "Wrong leadingDim!");
CheckNTErrors(XTensor::IsSameShaped(output, gold), CheckNTErrors(IsSameShaped(output, gold),
"The output tensor and gold tensor must be of the same size!"); "The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize, CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize,
"Wrong weight tensor!"); "Wrong weight tensor!");
...@@ -564,7 +565,7 @@ void _CrossEntropyBackward(XTensor * dedy, const XTensor * output, ...@@ -564,7 +565,7 @@ void _CrossEntropyBackward(XTensor * dedy, const XTensor * output,
CheckNTErrors(n >= 0 && n < output->order, CheckNTErrors(n >= 0 && n < output->order,
"Wrong leading dimension!"); "Wrong leading dimension!");
CheckNTErrors(XTensor::IsSameShaped(dedy, output, gold), CheckNTErrors(IsSameShaped(dedy, output, gold),
"The output tensor and gold tensor must be of the same size!"); "The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize, CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize,
"Wrong weight tensor!"); "Wrong weight tensor!");
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include "../core/reduce/ReduceSumAll.h" #include "../core/reduce/ReduceSumAll.h"
#include "../core/shape/Transpose.h" #include "../core/shape/Transpose.h"
#include "../core/shape/Unsqueeze.h" #include "../core/shape/Unsqueeze.h"
#include "../core/shape/IsSameShaped.h"
namespace nts{ // namespace nts(NiuTrans.Tensor) namespace nts{ // namespace nts(NiuTrans.Tensor)
...@@ -100,7 +101,7 @@ DTYPE _CudaCrossEntropyFast(const XTensor * output, const XTensor * gold, ...@@ -100,7 +101,7 @@ DTYPE _CudaCrossEntropyFast(const XTensor * output, const XTensor * gold,
CheckNTErrors(n >= 0 && n < output->order, CheckNTErrors(n >= 0 && n < output->order,
"Wrong leadingDim!"); "Wrong leadingDim!");
CheckNTErrors(XTensor::IsSameShaped(output, gold), CheckNTErrors(IsSameShaped(output, gold),
"The output tensor and gold tensor must be of the same size!"); "The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize, CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize,
"Wrong weight tensor!"); "Wrong weight tensor!");
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "../core/math/Unary.h" #include "../core/math/Unary.h"
#include "../core/utilities/CheckData.h"
#include "TAbsolute.h" #include "TAbsolute.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -66,7 +67,7 @@ bool TestAbsolute1() ...@@ -66,7 +67,7 @@ bool TestAbsolute1()
bUser = Absolute(*a); bUser = Absolute(*a);
/* check results */ /* check results */
cpuTest = b->CheckData(answer, unitNum, 1e-4F) && aMe->CheckData(answer, unitNum, 1e-4F) && bUser.CheckData(answer, unitNum, 1e-4F); cpuTest = CheckData(b, answer, unitNum, 1e-4F) && CheckData(aMe, answer, unitNum, 1e-4F) && CheckData(&bUser, answer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -88,7 +89,7 @@ bool TestAbsolute1() ...@@ -88,7 +89,7 @@ bool TestAbsolute1()
bUserGPU = Absolute(*aGPU); bUserGPU = Absolute(*aGPU);
/* check results */ /* check results */
gpuTest = bGPU->CheckData(answer, unitNum, 1e-4F) && aMeGPU->CheckData(answer, unitNum, 1e-4F) && bUserGPU.CheckData(answer, unitNum, 1e-4F); gpuTest = CheckData(bGPU, answer, unitNum, 1e-4F) && CheckData(aMeGPU, answer, unitNum, 1e-4F) && CheckData(&bUserGPU, answer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a; delete a;
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "../XTensor.h" #include "../XTensor.h"
#include "../core/math/Clip.h" #include "../core/math/Clip.h"
#include "../core/utilities/CheckData.h"
#include "TClip.h" #include "TClip.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -67,9 +68,9 @@ bool TestClip1() ...@@ -67,9 +68,9 @@ bool TestClip1()
bUser = Clip(*a, -1.0, 1.0); bUser = Clip(*a, -1.0, 1.0);
/* check results */ /* check results */
cpuTest = b->CheckData(answer, aUnitNum, 1e-4F) && cpuTest = CheckData(b, answer, aUnitNum, 1e-4F) &&
aMe->CheckData(answer, aUnitNum, 1e-4F) && CheckData(aMe, answer, aUnitNum, 1e-4F) &&
bUser.CheckData(answer, aUnitNum, 1e-4F); CheckData(&bUser, answer, aUnitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -91,9 +92,9 @@ bool TestClip1() ...@@ -91,9 +92,9 @@ bool TestClip1()
bUserGPU = Clip(*aGPU, -1.0, 1.0); bUserGPU = Clip(*aGPU, -1.0, 1.0);
/* check results */ /* check results */
gpuTest = bGPU->CheckData(answer, aUnitNum, 1e-4F) && gpuTest = CheckData(bGPU, answer, aUnitNum, 1e-4F) &&
aMeGPU->CheckData(answer, aUnitNum, 1e-4F) && CheckData(aMeGPU, answer, aUnitNum, 1e-4F) &&
bUserGPU.CheckData(answer, aUnitNum, 1e-4F); CheckData(&bUserGPU, answer, aUnitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a; delete a;
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "../XTensor.h" #include "../XTensor.h"
#include "../core/math/Compare.h" #include "../core/math/Compare.h"
#include "../core/utilities/CheckData.h"
#include "TCompare.h" #include "TCompare.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -67,9 +68,9 @@ bool TestCompare1() ...@@ -67,9 +68,9 @@ bool TestCompare1()
bUser = Equal(*a, 1.0); bUser = Equal(*a, 1.0);
/* check results */ /* check results */
cpuTest = b->CheckData(answer, aUnitNum, 1e-4F) && cpuTest = CheckData(b, answer, aUnitNum, 1e-4F) &&
aMe->CheckData(answer, aUnitNum, 1e-4F) && CheckData(aMe, answer, aUnitNum, 1e-4F) &&
bUser.CheckData(answer, aUnitNum, 1e-4F); CheckData(&bUser, answer, aUnitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -91,9 +92,9 @@ bool TestCompare1() ...@@ -91,9 +92,9 @@ bool TestCompare1()
bUserGPU = Equal(*aGPU, 1.0); bUserGPU = Equal(*aGPU, 1.0);
/* check results */ /* check results */
gpuTest = bGPU->CheckData(answer, aUnitNum, 1e-4F) && gpuTest = CheckData(bGPU, answer, aUnitNum, 1e-4F) &&
aMeGPU->CheckData(answer, aUnitNum, 1e-4F) && CheckData(aMeGPU, answer, aUnitNum, 1e-4F) &&
bUserGPU.CheckData(answer, aUnitNum, 1e-4F); CheckData(&bUserGPU, answer, aUnitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a; delete a;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-14 * $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-14
*/ */
#include "../core/utilities/CheckData.h"
#include "TConcatenate.h" #include "TConcatenate.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -92,7 +93,7 @@ bool TestConcatenate1() ...@@ -92,7 +93,7 @@ bool TestConcatenate1()
tUser = Concatenate(*sList, 1); tUser = Concatenate(*sList, 1);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum) && CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -121,7 +122,7 @@ bool TestConcatenate1() ...@@ -121,7 +122,7 @@ bool TestConcatenate1()
tUserGPU = Concatenate(*sList, 1); tUserGPU = Concatenate(*sList, 1);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum) && CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete sList; delete sList;
...@@ -221,7 +222,7 @@ bool TestConcatenate2() ...@@ -221,7 +222,7 @@ bool TestConcatenate2()
tUser = Concatenate(*sList, 0); tUser = Concatenate(*sList, 0);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum) && CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -250,7 +251,7 @@ bool TestConcatenate2() ...@@ -250,7 +251,7 @@ bool TestConcatenate2()
tUserGPU = Concatenate(*sList, 0); tUserGPU = Concatenate(*sList, 0);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum) && CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete sList; delete sList;
...@@ -348,7 +349,7 @@ bool TestConcatenate3() ...@@ -348,7 +349,7 @@ bool TestConcatenate3()
tUser = Concatenate(*sList, 1); tUser = Concatenate(*sList, 1);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum) && CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -377,7 +378,7 @@ bool TestConcatenate3() ...@@ -377,7 +378,7 @@ bool TestConcatenate3()
tUserGPU = Concatenate(*sList, 1); tUserGPU = Concatenate(*sList, 1);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum) && CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete sList; delete sList;
...@@ -468,7 +469,7 @@ bool TestConcatenate4() ...@@ -468,7 +469,7 @@ bool TestConcatenate4()
tUser = Concatenate(*s1, *s2, 1); tUser = Concatenate(*s1, *s2, 1);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum) && CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -490,7 +491,7 @@ bool TestConcatenate4() ...@@ -490,7 +491,7 @@ bool TestConcatenate4()
tUserGPU = Concatenate(*sGPU1, *sGPU2, 1); tUserGPU = Concatenate(*sGPU1, *sGPU2, 1);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum) && CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1; delete s1;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "../XList.h" #include "../XList.h"
#include "../core/utilities/CheckData.h"
#include "TConcatenateSolely.h" #include "TConcatenateSolely.h"
namespace nts { // namespace nt(NiuTrans.Tensor) namespace nts { // namespace nt(NiuTrans.Tensor)
...@@ -91,7 +92,7 @@ bool TestConcatenateSolely1() ...@@ -91,7 +92,7 @@ bool TestConcatenateSolely1()
_ConcatenateSolely(sList, t, 1); _ConcatenateSolely(sList, t, 1);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -118,7 +119,7 @@ bool TestConcatenateSolely1() ...@@ -118,7 +119,7 @@ bool TestConcatenateSolely1()
_ConcatenateSolely(sList, tGPU, 1); _ConcatenateSolely(sList, tGPU, 1);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete sList; delete sList;
...@@ -216,7 +217,7 @@ bool TestConcatenateSolely2() ...@@ -216,7 +217,7 @@ bool TestConcatenateSolely2()
_ConcatenateSolely(sList, t, 0); _ConcatenateSolely(sList, t, 0);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -243,7 +244,7 @@ bool TestConcatenateSolely2() ...@@ -243,7 +244,7 @@ bool TestConcatenateSolely2()
_ConcatenateSolely(sList, tGPU, 0); _ConcatenateSolely(sList, tGPU, 0);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete sList; delete sList;
...@@ -339,7 +340,7 @@ bool TestConcatenateSolely3() ...@@ -339,7 +340,7 @@ bool TestConcatenateSolely3()
_ConcatenateSolely(sList, t, 1); _ConcatenateSolely(sList, t, 1);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -366,7 +367,7 @@ bool TestConcatenateSolely3() ...@@ -366,7 +367,7 @@ bool TestConcatenateSolely3()
_ConcatenateSolely(sList, tGPU, 1); _ConcatenateSolely(sList, tGPU, 1);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete sList; delete sList;
......
...@@ -19,8 +19,9 @@ ...@@ -19,8 +19,9 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-12 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-12
*/ */
#include "TConvertDataType.h"
#include "../core/arithmetic/MatrixMul.h" #include "../core/arithmetic/MatrixMul.h"
#include "../core/utilities/CheckData.h"
#include "TConvertDataType.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -63,7 +64,7 @@ bool TestConvertDataType1() ...@@ -63,7 +64,7 @@ bool TestConvertDataType1()
_ConvertDataType(a, b); _ConvertDataType(a, b);
/* check results */ /* check results */
cpuTest = b->CheckData(answer, aUnitNum); cpuTest = CheckData(b, answer, aUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -80,7 +81,7 @@ bool TestConvertDataType1() ...@@ -80,7 +81,7 @@ bool TestConvertDataType1()
_ConvertDataType(aGPU, bGPU); _ConvertDataType(aGPU, bGPU);
/* check results */ /* check results */
gpuTest = bGPU->CheckData(answer, aUnitNum); gpuTest = CheckData(bGPU, answer, aUnitNum);
/* destroy variables */ /* destroy variables */
delete a; delete a;
...@@ -138,7 +139,7 @@ bool TestConvertDataType2() ...@@ -138,7 +139,7 @@ bool TestConvertDataType2()
_ConvertDataType(a, b); _ConvertDataType(a, b);
/* check results */ /* check results */
cpuTest = b->CheckData(answer, aUnitNum, 1e-4F); cpuTest = CheckData(b, answer, aUnitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -155,7 +156,7 @@ bool TestConvertDataType2() ...@@ -155,7 +156,7 @@ bool TestConvertDataType2()
_ConvertDataType(aGPU, bGPU); _ConvertDataType(aGPU, bGPU);
/* check results */ /* check results */
gpuTest = bGPU->CheckData(answer, aUnitNum, 1e-4F); gpuTest = CheckData(bGPU, answer, aUnitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a; delete a;
...@@ -237,7 +238,7 @@ bool TestConvertDataType3() ...@@ -237,7 +238,7 @@ bool TestConvertDataType3()
//_ConvertDataType(b, c); //_ConvertDataType(b, c);
/* check results */ /* check results */
//cpuTest = a->CheckData(data1, unitNum1, 1e-4F); //cpuTest = CheckData(a, data1, unitNum1, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -263,7 +264,7 @@ bool TestConvertDataType3() ...@@ -263,7 +264,7 @@ bool TestConvertDataType3()
_ConvertDataType(eGPU, fGPU); _ConvertDataType(eGPU, fGPU);
/* check results */ /* check results */
gpuTest = fGPU->CheckData(answer, unitNum3, 1e-4F); gpuTest = CheckData(fGPU, answer, unitNum3, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a; delete a;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/ */
#include "../core/utilities/CheckData.h"
#include "TCopyIndexed.h" #include "TCopyIndexed.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -104,9 +105,9 @@ bool TestCopyIndexed1() ...@@ -104,9 +105,9 @@ bool TestCopyIndexed1()
tUser = CopyIndexed(*s, dim, *sIndex, *tIndex, copyNum); tUser = CopyIndexed(*s, dim, *sIndex, *tIndex, copyNum);
/* check results */ /* check results */
cpuTest = t1->CheckData(answer, tUnitNum) && cpuTest = CheckData(t1, answer, tUnitNum) &&
t2->CheckData(answer, tUnitNum) && CheckData(t2, answer, tUnitNum) &&
tUser.CheckData(answer, tUnitNum); CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -133,9 +134,9 @@ bool TestCopyIndexed1() ...@@ -133,9 +134,9 @@ bool TestCopyIndexed1()
tUserGPU = CopyIndexed(*sGPU, dim, *sIndexGPU, *tIndexGPU, copyNum); tUserGPU = CopyIndexed(*sGPU, dim, *sIndexGPU, *tIndexGPU, copyNum);
/* check results */ /* check results */
gpuTest = tGPU1->CheckData(answer, tUnitNum) && gpuTest = CheckData(tGPU1, answer, tUnitNum) &&
tGPU2->CheckData(answer, tUnitNum) && CheckData(tGPU2, answer, tUnitNum) &&
tUserGPU.CheckData(answer, tUnitNum); CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
...@@ -249,9 +250,9 @@ bool TestCopyIndexed2() ...@@ -249,9 +250,9 @@ bool TestCopyIndexed2()
tUser = CopyIndexed(*s, dim, *sIndex, *tIndex); tUser = CopyIndexed(*s, dim, *sIndex, *tIndex);
/* check results */ /* check results */
cpuTest = t1->CheckData(answer, tUnitNum) && cpuTest = CheckData(t1, answer, tUnitNum) &&
t2->CheckData(answer, tUnitNum) && CheckData(t2, answer, tUnitNum) &&
tUser.CheckData(answer, tUnitNum); CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -278,9 +279,9 @@ bool TestCopyIndexed2() ...@@ -278,9 +279,9 @@ bool TestCopyIndexed2()
tUserGPU = CopyIndexed(*sGPU, dim, *sIndexGPU, *tIndexGPU, copyNum); tUserGPU = CopyIndexed(*sGPU, dim, *sIndexGPU, *tIndexGPU, copyNum);
/* check results */ /* check results */
gpuTest = tGPU1->CheckData(answer, tUnitNum) && gpuTest = CheckData(tGPU1, answer, tUnitNum) &&
tGPU2->CheckData(answer, tUnitNum) && CheckData(tGPU2, answer, tUnitNum) &&
tUserGPU.CheckData(answer, tUnitNum); CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
...@@ -394,9 +395,9 @@ bool TestCopyIndexed3() ...@@ -394,9 +395,9 @@ bool TestCopyIndexed3()
tUser = CopyIndexed(*s, dim, *sIndex, *tIndex, copyNum); tUser = CopyIndexed(*s, dim, *sIndex, *tIndex, copyNum);
/* check results */ /* check results */
cpuTest = t1->CheckData(answer, tUnitNum) && cpuTest = CheckData(t1, answer, tUnitNum) &&
t2->CheckData(answer, tUnitNum) && CheckData(t2, answer, tUnitNum) &&
tUser.CheckData(answer, tUnitNum); CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -423,9 +424,9 @@ bool TestCopyIndexed3() ...@@ -423,9 +424,9 @@ bool TestCopyIndexed3()
tUserGPU = CopyIndexed(*sGPU, dim, *sIndexGPU, *tIndexGPU, copyNum); tUserGPU = CopyIndexed(*sGPU, dim, *sIndexGPU, *tIndexGPU, copyNum);
/* check results */ /* check results */
gpuTest = tGPU1->CheckData(answer, tUnitNum) && gpuTest = CheckData(tGPU1, answer, tUnitNum) &&
tGPU2->CheckData(answer, tUnitNum) && CheckData(tGPU2, answer, tUnitNum) &&
tUserGPU.CheckData(answer, tUnitNum); CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
...@@ -539,9 +540,9 @@ bool TestCopyIndexed4() ...@@ -539,9 +540,9 @@ bool TestCopyIndexed4()
tUser = CopyIndexed(*s, dim, *sIndex, *tIndex, copyNum); tUser = CopyIndexed(*s, dim, *sIndex, *tIndex, copyNum);
/* check results */ /* check results */
cpuTest = t1->CheckData(answer, tUnitNum) && cpuTest = CheckData(t1, answer, tUnitNum) &&
t2->CheckData(answer, tUnitNum) && CheckData(t2, answer, tUnitNum) &&
tUser.CheckData(answer, tUnitNum); CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -568,9 +569,9 @@ bool TestCopyIndexed4() ...@@ -568,9 +569,9 @@ bool TestCopyIndexed4()
tUserGPU = CopyIndexed(*sGPU, dim, *sIndexGPU, *tIndexGPU, copyNum); tUserGPU = CopyIndexed(*sGPU, dim, *sIndexGPU, *tIndexGPU, copyNum);
/* check results */ /* check results */
gpuTest = tGPU1->CheckData(answer, tUnitNum) && gpuTest = CheckData(tGPU1, answer, tUnitNum) &&
tGPU2->CheckData(answer, tUnitNum) && CheckData(tGPU2, answer, tUnitNum) &&
tUserGPU.CheckData(answer, tUnitNum); CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
...@@ -684,9 +685,9 @@ bool TestCopyIndexed5() ...@@ -684,9 +685,9 @@ bool TestCopyIndexed5()
tUser = CopyIndexed(*s, dim, *sIndex, *tIndex, copyNum); tUser = CopyIndexed(*s, dim, *sIndex, *tIndex, copyNum);
/* check results */ /* check results */
cpuTest = t1->CheckData(answer, tUnitNum) && cpuTest = CheckData(t1, answer, tUnitNum) &&
t2->CheckData(answer, tUnitNum) && CheckData(t2, answer, tUnitNum) &&
tUser.CheckData(answer, tUnitNum); CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -713,9 +714,9 @@ bool TestCopyIndexed5() ...@@ -713,9 +714,9 @@ bool TestCopyIndexed5()
tUserGPU = CopyIndexed(*sGPU, dim, *sIndexGPU, *tIndexGPU, copyNum); tUserGPU = CopyIndexed(*sGPU, dim, *sIndexGPU, *tIndexGPU, copyNum);
/* check results */ /* check results */
gpuTest = tGPU1->CheckData(answer, tUnitNum) && gpuTest = CheckData(tGPU1, answer, tUnitNum) &&
tGPU2->CheckData(answer, tUnitNum) && CheckData(tGPU2, answer, tUnitNum) &&
tUserGPU.CheckData(answer, tUnitNum); CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "../XUtility.h" #include "../XUtility.h"
#include "../core/utilities/CheckData.h"
#include "TCopyValues.h" #include "TCopyValues.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -57,7 +58,7 @@ bool TestCopyValues1() ...@@ -57,7 +58,7 @@ bool TestCopyValues1()
tUser = CopyValues(*s); tUser = CopyValues(*s);
/* check results */ /* check results */
cpuTest = t->CheckData(sData, sUnitNum) && tUser.CheckData(sData, sUnitNum); cpuTest = CheckData(t, sData, sUnitNum) && CheckData(&tUser, sData, sUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -77,7 +78,7 @@ bool TestCopyValues1() ...@@ -77,7 +78,7 @@ bool TestCopyValues1()
tUserGPU = CopyValues(*sGPU); tUserGPU = CopyValues(*sGPU);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(sData, sUnitNum) && tUser.CheckData(sData, sUnitNum); gpuTest = CheckData(tGPU, sData, sUnitNum) && CheckData(&tUser, sData, sUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "../core/math/Unary.h" #include "../core/math/Unary.h"
#include "../core/utilities/CheckData.h"
#include "TCos.h" #include "TCos.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -66,7 +67,7 @@ bool TestCos1() ...@@ -66,7 +67,7 @@ bool TestCos1()
bUser = Cos(*a); bUser = Cos(*a);
/* check results */ /* check results */
cpuTest = b->CheckData(answer, unitNum, 1e-4F) && aMe->CheckData(answer, unitNum, 1e-4F) && bUser.CheckData(answer, unitNum, 1e-4F); cpuTest = CheckData(b, answer, unitNum, 1e-4F) && CheckData(aMe, answer, unitNum, 1e-4F) && CheckData(&bUser, answer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -88,7 +89,7 @@ bool TestCos1() ...@@ -88,7 +89,7 @@ bool TestCos1()
bUserGPU = Cos(*aGPU); bUserGPU = Cos(*aGPU);
/* check results */ /* check results */
gpuTest = bGPU->CheckData(answer, unitNum, 1e-4F) && aMeGPU->CheckData(answer, unitNum, 1e-4F) && bUserGPU.CheckData(answer, unitNum, 1e-4F); gpuTest = CheckData(bGPU, answer, unitNum, 1e-4F) && CheckData(aMeGPU, answer, unitNum, 1e-4F) && CheckData(&bUserGPU, answer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a; delete a;
......
...@@ -20,9 +20,10 @@ ...@@ -20,9 +20,10 @@
*/ */
#include <math.h> #include <math.h>
#include "TCrossEntropy.h" #include "../core/utilities/CheckData.h"
#include "../loss/CrossEntropy.h" #include "../loss/CrossEntropy.h"
#include "../core/math/ScaleAndShift.h" #include "../core/math/ScaleAndShift.h"
#include "TCrossEntropy.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -276,7 +277,7 @@ bool TestCrossEntropy3() ...@@ -276,7 +277,7 @@ bool TestCrossEntropy3()
_CrossEntropyFast(output, gold, loss, weight); _CrossEntropyFast(output, gold, loss, weight);
/* check results */ /* check results */
cpuTest = loss->CheckData(answer, 4, 1e-4F); cpuTest = CheckData(loss, answer, 4, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -301,7 +302,7 @@ bool TestCrossEntropy3() ...@@ -301,7 +302,7 @@ bool TestCrossEntropy3()
_CrossEntropyFast(outputGPU, goldGPU, lossGPU, weightGPU); _CrossEntropyFast(outputGPU, goldGPU, lossGPU, weightGPU);
/* check results */ /* check results */
gpuTest = lossGPU->CheckData(answer, 4, 1e-4F); gpuTest = CheckData(lossGPU, answer, 4, 1e-4F);
/* destroy variables */ /* destroy variables */
delete output; delete output;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-08-01 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-08-01
*/ */
#include "../core/utilities/CheckData.h"
#include "TDiv.h" #include "TDiv.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -89,9 +90,9 @@ bool TestDiv1() ...@@ -89,9 +90,9 @@ bool TestDiv1()
tUser = Div(*s1, *s2, 0); tUser = Div(*s1, *s2, 0);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum, 1e-4F) && cpuTest = CheckData(t, answer, tUnitNum, 1e-4F) &&
tMe->CheckData(answer, tUnitNum, 1e-4F) && CheckData(tMe, answer, tUnitNum, 1e-4F) &&
tUser.CheckData(answer, tUnitNum, 1e-4F); CheckData(&tUser, answer, tUnitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -116,9 +117,9 @@ bool TestDiv1() ...@@ -116,9 +117,9 @@ bool TestDiv1()
tUserGPU = Div(*sGPU1, *sGPU2, 0); tUserGPU = Div(*sGPU1, *sGPU2, 0);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum, 1e-4F) && gpuTest = CheckData(tGPU, answer, tUnitNum, 1e-4F) &&
tMeGPU->CheckData(answer, tUnitNum, 1e-4F) && CheckData(tMeGPU, answer, tUnitNum, 1e-4F) &&
tUserGPU.CheckData(answer, tUnitNum, 1e-4F); CheckData(&tUserGPU, answer, tUnitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete s1; delete s1;
......
...@@ -19,9 +19,10 @@ ...@@ -19,9 +19,10 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-08-14 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-08-14
*/ */
#include "TDivDim.h"
#include "../core/arithmetic/DivDim.h"
#include "../XTensor.h" #include "../XTensor.h"
#include "../core/arithmetic/DivDim.h"
#include "../core/utilities/CheckData.h"
#include "TDivDim.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -80,9 +81,9 @@ bool TestDivDim1() ...@@ -80,9 +81,9 @@ bool TestDivDim1()
cUser = DivDim(*a, *b, 0); cUser = DivDim(*a, *b, 0);
/* check results */ /* check results */
cpuTest = c->CheckData(answer, aUnitNum) && cpuTest = CheckData(c, answer, aUnitNum) &&
cMe->CheckData(answer, aUnitNum) && CheckData(cMe, answer, aUnitNum) &&
cUser.CheckData(answer, aUnitNum); CheckData(&cUser, answer, aUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -107,9 +108,9 @@ bool TestDivDim1() ...@@ -107,9 +108,9 @@ bool TestDivDim1()
cUserGPU = DivDim(*aGPU, *bGPU, 0); cUserGPU = DivDim(*aGPU, *bGPU, 0);
/* check results */ /* check results */
gpuTest = cGPU->CheckData(answer, aUnitNum) && gpuTest = CheckData(cGPU, answer, aUnitNum) &&
cMeGPU->CheckData(answer, aUnitNum) && CheckData(cMeGPU, answer, aUnitNum) &&
cUserGPU.CheckData(answer, aUnitNum); CheckData(&cUserGPU, answer, aUnitNum);
/* destroy variables */ /* destroy variables */
delete a; delete a;
...@@ -194,9 +195,9 @@ bool TestDivDim2() ...@@ -194,9 +195,9 @@ bool TestDivDim2()
cUser = DivDim(*a, *b, 1); cUser = DivDim(*a, *b, 1);
/* check results */ /* check results */
cpuTest = c->CheckData(answer, aUnitNum) && cpuTest = CheckData(c, answer, aUnitNum) &&
cMe->CheckData(answer, aUnitNum) && CheckData(cMe, answer, aUnitNum) &&
cUser.CheckData(answer, aUnitNum); CheckData(&cUser, answer, aUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -221,9 +222,9 @@ bool TestDivDim2() ...@@ -221,9 +222,9 @@ bool TestDivDim2()
cUserGPU = DivDim(*aGPU, *bGPU, 1); cUserGPU = DivDim(*aGPU, *bGPU, 1);
/* check results */ /* check results */
gpuTest = cGPU->CheckData(answer, aUnitNum) && gpuTest = CheckData(cGPU, answer, aUnitNum) &&
cMeGPU->CheckData(answer, aUnitNum) && CheckData(cMeGPU, answer, aUnitNum) &&
cUserGPU.CheckData(answer, aUnitNum); CheckData(&cUserGPU, answer, aUnitNum);
/* destroy variables */ /* destroy variables */
delete a; delete a;
......
...@@ -20,8 +20,8 @@ ...@@ -20,8 +20,8 @@
*/ */
#include "../XUtility.h" #include "../XUtility.h"
#include "TDropout.h"
#include "../core/getandset/SetData.h" #include "../core/getandset/SetData.h"
#include "TDropout.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "../core/math/Unary.h" #include "../core/math/Unary.h"
#include "../core/utilities/CheckData.h"
#include "TExp.h" #include "TExp.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -66,9 +67,9 @@ bool TestExp1() ...@@ -66,9 +67,9 @@ bool TestExp1()
bUser = Exp(*a); bUser = Exp(*a);
/* check results */ /* check results */
cpuTest = b->CheckData(answer, unitNum, 1e-4F) && cpuTest = CheckData(b, answer, unitNum, 1e-4F) &&
aMe->CheckData(answer, unitNum, 1e-4F) && CheckData(aMe, answer, unitNum, 1e-4F) &&
bUser.CheckData(answer, unitNum, 1e-4F); CheckData(&bUser, answer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -90,9 +91,9 @@ bool TestExp1() ...@@ -90,9 +91,9 @@ bool TestExp1()
bUserGPU = Exp(*aGPU); bUserGPU = Exp(*aGPU);
/* check results */ /* check results */
gpuTest = bGPU->CheckData(answer, unitNum, 1e-4F) && gpuTest = CheckData(bGPU, answer, unitNum, 1e-4F) &&
aMeGPU->CheckData(answer, unitNum, 1e-4F) && \ CheckData(aMeGPU, answer, unitNum, 1e-4F) && \
bUserGPU.CheckData(answer, unitNum, 1e-4F); CheckData(&bUserGPU, answer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a; delete a;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-09-18 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-09-18
*/ */
#include "../core/utilities/CheckData.h"
#include "TGather.h" #include "TGather.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -89,8 +90,8 @@ bool TestGather1() ...@@ -89,8 +90,8 @@ bool TestGather1()
tUser = Gather(*s, *index); tUser = Gather(*s, *index);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && cpuTest = CheckData(t, answer, tUnitNum) &&
tUser.CheckData(answer, tUnitNum); CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -112,8 +113,8 @@ bool TestGather1() ...@@ -112,8 +113,8 @@ bool TestGather1()
tUserGPU = Gather(*sGPU, *indexGPU); tUserGPU = Gather(*sGPU, *indexGPU);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && gpuTest = CheckData(tGPU, answer, tUnitNum) &&
tUserGPU.CheckData(answer, tUnitNum); CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "../XTensor.h" #include "../XTensor.h"
#include "../core/utilities/CheckData.h"
#include "THardTanH.h" #include "THardTanH.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -64,7 +65,7 @@ bool TestHardTanH1() ...@@ -64,7 +65,7 @@ bool TestHardTanH1()
yUser = HardTanH(*x); yUser = HardTanH(*x);
/* check results */ /* check results */
cpuTest = y->CheckData(answer, unitNum, 1e-4F) && yUser.CheckData(answer, unitNum, 1e-4F); cpuTest = CheckData(y, answer, unitNum, 1e-4F) && CheckData(&yUser, answer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -84,7 +85,7 @@ bool TestHardTanH1() ...@@ -84,7 +85,7 @@ bool TestHardTanH1()
yUserGPU = HardTanH(*xGPU); yUserGPU = HardTanH(*xGPU);
/* check results */ /* check results */
gpuTest = yGPU->CheckData(answer, unitNum, 1e-4F) && yUserGPU.CheckData(answer, unitNum, 1e-4F); gpuTest = CheckData(yGPU, answer, unitNum, 1e-4F) && CheckData(&yUserGPU, answer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
...@@ -158,8 +159,8 @@ bool TestHardTanH2() ...@@ -158,8 +159,8 @@ bool TestHardTanH2()
_HardTanHBackward(y, x, dedy, dedx); _HardTanHBackward(y, x, dedy, dedx);
/* check results */ /* check results */
cpuTest = y->CheckData(yAnswer, unitNum, 1e-4F) && cpuTest = CheckData(y, yAnswer, unitNum, 1e-4F) &&
dedx->CheckData(dedxAnswer, unitNum, 1e-4F); CheckData(dedx, dedxAnswer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -184,8 +185,8 @@ bool TestHardTanH2() ...@@ -184,8 +185,8 @@ bool TestHardTanH2()
_HardTanHBackward(yGPU, xGPU, dedyGPU, dedxGPU); _HardTanHBackward(yGPU, xGPU, dedyGPU, dedxGPU);
/* check results */ /* check results */
gpuTest = yGPU->CheckData(yAnswer, unitNum, 1e-4F) && gpuTest = CheckData(yGPU, yAnswer, unitNum, 1e-4F) &&
dedxGPU->CheckData(dedxAnswer, unitNum, 1e-4F); CheckData(dedxGPU, dedxAnswer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "../XUtility.h" #include "../XUtility.h"
#include "../core/utilities/CheckData.h"
#include "TIdentity.h" #include "TIdentity.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -62,7 +63,7 @@ bool TestIdentity1() ...@@ -62,7 +63,7 @@ bool TestIdentity1()
yUser = Identity(*x); yUser = Identity(*x);
/* check result */ /* check result */
cpuTest = y->CheckData(answer, unitNum, 1e-4F) && yUser.CheckData(answer, unitNum, 1e-4F); cpuTest = CheckData(y, answer, unitNum, 1e-4F) && CheckData(&yUser, answer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -82,7 +83,7 @@ bool TestIdentity1() ...@@ -82,7 +83,7 @@ bool TestIdentity1()
yUserGPU = Identity(*xGPU); yUserGPU = Identity(*xGPU);
/* check result */ /* check result */
gpuTest = yGPU->CheckData(answer, unitNum, 1e-4F) && yUserGPU.CheckData(answer, unitNum, 1e-4F); gpuTest = CheckData(yGPU, answer, unitNum, 1e-4F) && CheckData(&yUserGPU, answer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
...@@ -146,8 +147,8 @@ bool TestIdentity2() ...@@ -146,8 +147,8 @@ bool TestIdentity2()
_IdentityBackward(y, x, dedy, dedx); _IdentityBackward(y, x, dedy, dedx);
/* check result */ /* check result */
cpuTest = y->CheckData(yAnswer, unitNum, 1e-4F) && cpuTest = CheckData(y, yAnswer, unitNum, 1e-4F) &&
dedx->CheckData(dedxAnswer, unitNum, 1e-4F); CheckData(dedx, dedxAnswer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -172,8 +173,8 @@ bool TestIdentity2() ...@@ -172,8 +173,8 @@ bool TestIdentity2()
_IdentityBackward(yGPU, xGPU, dedyGPU, dedxGPU); _IdentityBackward(yGPU, xGPU, dedyGPU, dedxGPU);
/* check result */ /* check result */
gpuTest = yGPU->CheckData(yAnswer, unitNum, 1e-4F) && gpuTest = CheckData(yGPU, yAnswer, unitNum, 1e-4F) &&
dedxGPU->CheckData(dedxAnswer, unitNum, 1e-4F); CheckData(dedxGPU, dedxAnswer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "../core/math/Unary.h" #include "../core/math/Unary.h"
#include "../core/utilities/CheckData.h"
#include "TLog.h" #include "TLog.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -66,7 +67,7 @@ bool TestLog1() ...@@ -66,7 +67,7 @@ bool TestLog1()
bUser = Log(*a); bUser = Log(*a);
/* check results */ /* check results */
cpuTest = b->CheckData(answer, unitNum, 1e-4F) && aMe->CheckData(answer, unitNum, 1e-4F) && bUser.CheckData(answer, unitNum, 1e-4F); cpuTest = CheckData(b, answer, unitNum, 1e-4F) && CheckData(aMe, answer, unitNum, 1e-4F) && CheckData(&bUser, answer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -88,7 +89,7 @@ bool TestLog1() ...@@ -88,7 +89,7 @@ bool TestLog1()
bUserGPU = Log(*aGPU); bUserGPU = Log(*aGPU);
/* check results */ /* check results */
gpuTest = bGPU->CheckData(answer, unitNum, 1e-4F) && aMeGPU->CheckData(answer, unitNum, 1e-4F) && bUserGPU.CheckData(answer, unitNum, 1e-4F); gpuTest = CheckData(bGPU, answer, unitNum, 1e-4F) && CheckData(aMeGPU, answer, unitNum, 1e-4F) && CheckData(&bUserGPU, answer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a; delete a;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "../XUtility.h" #include "../XUtility.h"
#include "../core/utilities/CheckData.h"
#include "TLogSoftmax.h" #include "TLogSoftmax.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -62,7 +63,7 @@ bool TestLogSoftmax1() ...@@ -62,7 +63,7 @@ bool TestLogSoftmax1()
yUser = LogSoftmax(*x, 1); yUser = LogSoftmax(*x, 1);
/* check result */ /* check result */
cpuTest = y->CheckData(answer, unitNum, 1e-4F) && yUser.CheckData(answer, unitNum, 1e-4F); cpuTest = CheckData(y, answer, unitNum, 1e-4F) && CheckData(&yUser, answer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -82,7 +83,7 @@ bool TestLogSoftmax1() ...@@ -82,7 +83,7 @@ bool TestLogSoftmax1()
yUserGPU = LogSoftmax(*xGPU, 1); yUserGPU = LogSoftmax(*xGPU, 1);
/* check result */ /* check result */
gpuTest = yGPU->CheckData(answer, unitNum, 1e-4F) && yUserGPU.CheckData(answer, unitNum, 1e-4F); gpuTest = CheckData(yGPU, answer, unitNum, 1e-4F) && CheckData(&yUserGPU, answer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
...@@ -149,8 +150,8 @@ bool TestLogSoftmax2() ...@@ -149,8 +150,8 @@ bool TestLogSoftmax2()
_LogSoftmaxBackward(g, y, x, dedy, dedx, NULL, 1, CROSSENTROPY); _LogSoftmaxBackward(g, y, x, dedy, dedx, NULL, 1, CROSSENTROPY);
/* check result */ /* check result */
cpuTest = y->CheckData(yAnswer, unitNum, 1e-4F) cpuTest = CheckData(y, yAnswer, unitNum, 1e-4F)
&& dedx->CheckData(dedxAnswer, unitNum, 1e-4F); && CheckData(dedx, dedxAnswer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -177,7 +178,7 @@ bool TestLogSoftmax2() ...@@ -177,7 +178,7 @@ bool TestLogSoftmax2()
_LogSoftmaxBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, NULL, 1, CROSSENTROPY); _LogSoftmaxBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, NULL, 1, CROSSENTROPY);
/* check result */ /* check result */
gpuTest = yGPU->CheckData(yAnswer, unitNum, 1e-4F) && dedxGPU->CheckData(dedxAnswer, unitNum, 1e-4F); gpuTest = CheckData(yGPU, yAnswer, unitNum, 1e-4F) && CheckData(dedxGPU, dedxAnswer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
...@@ -253,8 +254,8 @@ bool TestLogSoftmax3() ...@@ -253,8 +254,8 @@ bool TestLogSoftmax3()
_LogSoftmaxBackward(g, y, x, dedy, dedx, NULL, 1, SQUAREDERROR); _LogSoftmaxBackward(g, y, x, dedy, dedx, NULL, 1, SQUAREDERROR);
/* check result */ /* check result */
cpuTest = y->CheckData(yAnswer, unitNum, 1e-4F) cpuTest = CheckData(y, yAnswer, unitNum, 1e-4F)
&& dedx->CheckData(dedxAnswer, unitNum, 1e-4F); && CheckData(dedx, dedxAnswer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -281,8 +282,8 @@ bool TestLogSoftmax3() ...@@ -281,8 +282,8 @@ bool TestLogSoftmax3()
_LogSoftmaxBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, NULL, 1, SQUAREDERROR); _LogSoftmaxBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, NULL, 1, SQUAREDERROR);
/* check result */ /* check result */
gpuTest = yGPU->CheckData(yAnswer, unitNum, 1e-4F) gpuTest = CheckData(yGPU, yAnswer, unitNum, 1e-4F)
&& dedxGPU->CheckData(dedxAnswer, unitNum, 1e-3F); && CheckData(dedxGPU, dedxAnswer, unitNum, 1e-3F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-14 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-14
*/ */
#include "../core/utilities/CheckData.h"
#include "TMatrixMul.h" #include "TMatrixMul.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -87,7 +88,7 @@ bool TestMatrixMul1() ...@@ -87,7 +88,7 @@ bool TestMatrixMul1()
tUser = MatrixMul(*s1, X_NOTRANS, *s2, X_NOTRANS); tUser = MatrixMul(*s1, X_NOTRANS, *s2, X_NOTRANS);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum) && CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -109,7 +110,7 @@ bool TestMatrixMul1() ...@@ -109,7 +110,7 @@ bool TestMatrixMul1()
tUserGPU = MatrixMul(*sGPU1, X_NOTRANS, *sGPU2, X_NOTRANS); tUserGPU = MatrixMul(*sGPU1, X_NOTRANS, *sGPU2, X_NOTRANS);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum) && CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1; delete s1;
...@@ -201,7 +202,7 @@ bool TestMatrixMul2() ...@@ -201,7 +202,7 @@ bool TestMatrixMul2()
tUser = MatrixMul(*s1, X_TRANS, *s2, X_NOTRANS); tUser = MatrixMul(*s1, X_TRANS, *s2, X_NOTRANS);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum) && CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -223,7 +224,7 @@ bool TestMatrixMul2() ...@@ -223,7 +224,7 @@ bool TestMatrixMul2()
tUserGPU = MatrixMul(*sGPU1, X_TRANS, *sGPU2, X_NOTRANS); tUserGPU = MatrixMul(*sGPU1, X_TRANS, *sGPU2, X_NOTRANS);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum) && CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1; delete s1;
...@@ -335,7 +336,7 @@ bool TestMatrixMul3() ...@@ -335,7 +336,7 @@ bool TestMatrixMul3()
tUser = MatrixMul(*s1, X_NOTRANS, *s2, X_NOTRANS); tUser = MatrixMul(*s1, X_NOTRANS, *s2, X_NOTRANS);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum) && CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -357,7 +358,7 @@ bool TestMatrixMul3() ...@@ -357,7 +358,7 @@ bool TestMatrixMul3()
tUserGPU = MatrixMul(*sGPU1, X_NOTRANS, *sGPU2, X_NOTRANS); tUserGPU = MatrixMul(*sGPU1, X_NOTRANS, *sGPU2, X_NOTRANS);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum) && CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1; delete s1;
...@@ -458,7 +459,7 @@ bool TestMatrixMul4() ...@@ -458,7 +459,7 @@ bool TestMatrixMul4()
tUser = MatrixMul(*s1, X_NOTRANS, *s2, X_NOTRANS); tUser = MatrixMul(*s1, X_NOTRANS, *s2, X_NOTRANS);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum) && CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -480,7 +481,7 @@ bool TestMatrixMul4() ...@@ -480,7 +481,7 @@ bool TestMatrixMul4()
tUserGPU = MatrixMul(*sGPU1, X_NOTRANS, *sGPU2, X_NOTRANS); tUserGPU = MatrixMul(*sGPU1, X_NOTRANS, *sGPU2, X_NOTRANS);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum) && CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1; delete s1;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15
*/ */
#include "../core/utilities/CheckData.h"
#include "TMatrixMul2D.h" #include "TMatrixMul2D.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -85,7 +86,7 @@ bool TestMatrixMul2D1() ...@@ -85,7 +86,7 @@ bool TestMatrixMul2D1()
_MatrixMul2D(s1, X_NOTRANS, s2, X_NOTRANS, t); _MatrixMul2D(s1, X_NOTRANS, s2, X_NOTRANS, t);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -105,7 +106,7 @@ bool TestMatrixMul2D1() ...@@ -105,7 +106,7 @@ bool TestMatrixMul2D1()
_MatrixMul2D(sGPU1, X_NOTRANS, sGPU2, X_NOTRANS, tGPU); _MatrixMul2D(sGPU1, X_NOTRANS, sGPU2, X_NOTRANS, tGPU);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1; delete s1;
...@@ -195,7 +196,7 @@ bool TestMatrixMul2D2() ...@@ -195,7 +196,7 @@ bool TestMatrixMul2D2()
_MatrixMul2D(s1, X_TRANS, s2, X_NOTRANS, t); _MatrixMul2D(s1, X_TRANS, s2, X_NOTRANS, t);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -215,7 +216,7 @@ bool TestMatrixMul2D2() ...@@ -215,7 +216,7 @@ bool TestMatrixMul2D2()
_MatrixMul2D(sGPU1, X_TRANS, sGPU2, X_NOTRANS, tGPU); _MatrixMul2D(sGPU1, X_TRANS, sGPU2, X_NOTRANS, tGPU);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1; delete s1;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-06 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-06
*/ */
#include "../core/utilities/CheckData.h"
#include "TMatrixMul2DParallel.h" #include "TMatrixMul2DParallel.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -85,7 +86,7 @@ bool TestMatrixMul2DParallel1() ...@@ -85,7 +86,7 @@ bool TestMatrixMul2DParallel1()
_MatrixMul2DParallel(s1, X_NOTRANS, s2, X_NOTRANS, t); _MatrixMul2DParallel(s1, X_NOTRANS, s2, X_NOTRANS, t);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1; delete s1;
...@@ -161,7 +162,7 @@ bool TestMatrixMul2DParallel2() ...@@ -161,7 +162,7 @@ bool TestMatrixMul2DParallel2()
_MatrixMul2DParallel(s1, X_TRANS, s2, X_NOTRANS, t); _MatrixMul2DParallel(s1, X_TRANS, s2, X_NOTRANS, t);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1; delete s1;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "../XTensor.h" #include "../XTensor.h"
#include "../core/utilities/CheckData.h"
#include "TMatrixMulBatched.h" #include "TMatrixMulBatched.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -87,7 +88,7 @@ bool TestMatrixMulBatched1() ...@@ -87,7 +88,7 @@ bool TestMatrixMulBatched1()
tUser = MatrixMulBatched(*s1, X_NOTRANS, *s2, X_NOTRANS); tUser = MatrixMulBatched(*s1, X_NOTRANS, *s2, X_NOTRANS);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum) && CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -109,7 +110,7 @@ bool TestMatrixMulBatched1() ...@@ -109,7 +110,7 @@ bool TestMatrixMulBatched1()
tUserGPU = MatrixMulBatched(*sGPU1, X_NOTRANS, *sGPU2, X_NOTRANS); tUserGPU = MatrixMulBatched(*sGPU1, X_NOTRANS, *sGPU2, X_NOTRANS);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum) && CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1; delete s1;
...@@ -209,7 +210,7 @@ bool TestMatrixMulBatched2() ...@@ -209,7 +210,7 @@ bool TestMatrixMulBatched2()
tUser = MatrixMulBatched(*s1, X_NOTRANS, *s2, X_NOTRANS); tUser = MatrixMulBatched(*s1, X_NOTRANS, *s2, X_NOTRANS);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum) && CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -231,7 +232,7 @@ bool TestMatrixMulBatched2() ...@@ -231,7 +232,7 @@ bool TestMatrixMulBatched2()
tUserGPU = MatrixMulBatched(*sGPU1, X_NOTRANS, *sGPU2, X_NOTRANS); tUserGPU = MatrixMulBatched(*sGPU1, X_NOTRANS, *sGPU2, X_NOTRANS);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum) && CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1; delete s1;
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "../XTensor.h" #include "../XTensor.h"
#include "../XList.h" #include "../XList.h"
#include "../core/utilities/CheckData.h"
#include "TMerge.h" #include "TMerge.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -71,7 +72,7 @@ bool TestMerge1() ...@@ -71,7 +72,7 @@ bool TestMerge1()
tUser = Merge(*s, 1, 0); tUser = Merge(*s, 1, 0);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum) && CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -91,7 +92,7 @@ bool TestMerge1() ...@@ -91,7 +92,7 @@ bool TestMerge1()
tUserGPU = Merge(*sGPU, 1, 0); tUserGPU = Merge(*sGPU, 1, 0);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum) && CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
...@@ -185,8 +186,8 @@ bool TestMerge2() ...@@ -185,8 +186,8 @@ bool TestMerge2()
tUser2 = Merge(*s, 2, 0); tUser2 = Merge(*s, 2, 0);
/* check results */ /* check results */
cpuTest = t1->CheckData(answer1, tUnitNum1) && tUser1.CheckData(answer1, tUnitNum1) cpuTest = CheckData(t1, answer1, tUnitNum1) && CheckData(&tUser1, answer1, tUnitNum1)
&& t2->CheckData(answer2, tUnitNum2) && tUser2.CheckData(answer2, tUnitNum2); && CheckData(t2, answer2, tUnitNum2) && CheckData(&tUser2, answer2, tUnitNum2);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -211,8 +212,8 @@ bool TestMerge2() ...@@ -211,8 +212,8 @@ bool TestMerge2()
tUserGPU2 = Merge(*sGPU, 2, 0); tUserGPU2 = Merge(*sGPU, 2, 0);
/* check results */ /* check results */
gpuTest = tGPU1->CheckData(answer1, tUnitNum1) && tUserGPU1.CheckData(answer1, tUnitNum1) gpuTest = CheckData(tGPU1, answer1, tUnitNum1) && CheckData(&tUserGPU1, answer1, tUnitNum1)
&& tGPU2->CheckData(answer2, tUnitNum2) && tUserGPU2.CheckData(answer2, tUnitNum2); && CheckData(tGPU2, answer2, tUnitNum2) && CheckData(&tUserGPU2, answer2, tUnitNum2);
/* destroy variables */ /* destroy variables */
delete s; delete s;
...@@ -301,7 +302,7 @@ bool TestMerge3() ...@@ -301,7 +302,7 @@ bool TestMerge3()
tUser = Merge(*smallList, 0); tUser = Merge(*smallList, 0);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum) && CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -330,7 +331,7 @@ bool TestMerge3() ...@@ -330,7 +331,7 @@ bool TestMerge3()
tUserGPU = Merge(*smallList, 0); tUserGPU = Merge(*smallList, 0);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum) && CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1; delete s1;
...@@ -417,7 +418,7 @@ bool TestMerge4() ...@@ -417,7 +418,7 @@ bool TestMerge4()
tUser = Merge(*smallList, 1); tUser = Merge(*smallList, 1);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum) && CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -446,7 +447,7 @@ bool TestMerge4() ...@@ -446,7 +447,7 @@ bool TestMerge4()
tUserGPU = Merge(*smallList, 1); tUserGPU = Merge(*smallList, 1);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum) && CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1; delete s1;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-15 * $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-15
*/ */
#include "../core/utilities/CheckData.h"
#include "TMultiply.h" #include "TMultiply.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -89,9 +90,9 @@ bool TestMultiply1() ...@@ -89,9 +90,9 @@ bool TestMultiply1()
tUser = Multiply(*s1, *s2, 0); tUser = Multiply(*s1, *s2, 0);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && cpuTest = CheckData(t, answer, tUnitNum) &&
tMe->CheckData(answer, tUnitNum) && CheckData(tMe, answer, tUnitNum) &&
tUser.CheckData(answer, tUnitNum); CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -116,9 +117,9 @@ bool TestMultiply1() ...@@ -116,9 +117,9 @@ bool TestMultiply1()
tUserGPU = Multiply(*sGPU1, *sGPU2, 0); tUserGPU = Multiply(*sGPU1, *sGPU2, 0);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && gpuTest = CheckData(tGPU, answer, tUnitNum) &&
tMeGPU->CheckData(answer, tUnitNum) && CheckData(tMeGPU, answer, tUnitNum) &&
tUserGPU.CheckData(answer, tUnitNum); CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s1; delete s1;
......
...@@ -19,9 +19,10 @@ ...@@ -19,9 +19,10 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-30 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-30
*/ */
#include "TMultiplyDim.h"
#include "../core/arithmetic/MultiplyDim.h" #include "../core/arithmetic/MultiplyDim.h"
#include "../core/utilities/CheckData.h"
#include "../XTensor.h" #include "../XTensor.h"
#include "TMultiplyDim.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* /*
...@@ -79,9 +80,9 @@ bool TestMultiplyDim1() ...@@ -79,9 +80,9 @@ bool TestMultiplyDim1()
cUser = MultiplyDim(*a, *b, 0); cUser = MultiplyDim(*a, *b, 0);
/* check results */ /* check results */
cpuTest = c->CheckData(answer, aUnitNum) && cpuTest = CheckData(c, answer, aUnitNum) &&
cMe->CheckData(answer, aUnitNum) && CheckData(cMe, answer, aUnitNum) &&
cUser.CheckData(answer, aUnitNum); CheckData(&cUser, answer, aUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -106,9 +107,9 @@ bool TestMultiplyDim1() ...@@ -106,9 +107,9 @@ bool TestMultiplyDim1()
cUserGPU = MultiplyDim(*aGPU, *bGPU, 0); cUserGPU = MultiplyDim(*aGPU, *bGPU, 0);
/* check results */ /* check results */
gpuTest = cGPU->CheckData(answer, aUnitNum) && gpuTest = CheckData(cGPU, answer, aUnitNum) &&
cMeGPU->CheckData(answer, aUnitNum) && CheckData(cMeGPU, answer, aUnitNum) &&
cUserGPU.CheckData(answer, aUnitNum); CheckData(&cUserGPU, answer, aUnitNum);
/* destroy variables */ /* destroy variables */
delete a; delete a;
...@@ -191,9 +192,9 @@ bool TestMultiplyDim2() ...@@ -191,9 +192,9 @@ bool TestMultiplyDim2()
cUser = MultiplyDim(*a, *b, 1); cUser = MultiplyDim(*a, *b, 1);
/* check results */ /* check results */
cpuTest = c->CheckData(answer, aUnitNum) && cpuTest = CheckData(c, answer, aUnitNum) &&
cMe->CheckData(answer, aUnitNum) && CheckData(cMe, answer, aUnitNum) &&
cUser.CheckData(answer, aUnitNum); CheckData(&cUser, answer, aUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -218,9 +219,9 @@ bool TestMultiplyDim2() ...@@ -218,9 +219,9 @@ bool TestMultiplyDim2()
cUserGPU = MultiplyDim(*aGPU, *bGPU, 1); cUserGPU = MultiplyDim(*aGPU, *bGPU, 1);
/* check results */ /* check results */
gpuTest = cGPU->CheckData(answer, aUnitNum) && gpuTest = CheckData(cGPU, answer, aUnitNum) &&
cMeGPU->CheckData(answer, aUnitNum) && CheckData(cMeGPU, answer, aUnitNum) &&
cUserGPU.CheckData(answer, aUnitNum); CheckData(&cUserGPU, answer, aUnitNum);
/* destroy variables */ /* destroy variables */
delete a; delete a;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-14 * $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-14
*/ */
#include "../core/utilities/CheckData.h"
#include "TNegate.h" #include "TNegate.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -62,7 +63,7 @@ bool TestNegate1() ...@@ -62,7 +63,7 @@ bool TestNegate1()
bUser = Negate(*a); bUser = Negate(*a);
/* check results */ /* check results */
cpuTest = b->CheckData(answer, aUnitNum, 1e-4F) && aMe->CheckData(answer, aUnitNum, 1e-4F) && bUser.CheckData(answer, aUnitNum, 1e-4F); cpuTest = CheckData(b, answer, aUnitNum, 1e-4F) && CheckData(aMe, answer, aUnitNum, 1e-4F) && CheckData(&bUser, answer, aUnitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -84,7 +85,7 @@ bool TestNegate1() ...@@ -84,7 +85,7 @@ bool TestNegate1()
bUserGPU = Negate(*aGPU); bUserGPU = Negate(*aGPU);
/* check results */ /* check results */
gpuTest = bGPU->CheckData(answer, aUnitNum, 1e-4F) && aMeGPU->CheckData(answer, aUnitNum, 1e-4F) && bUserGPU.CheckData(answer, aUnitNum, 1e-4F); gpuTest = CheckData(bGPU, answer, aUnitNum, 1e-4F) && CheckData(aMeGPU, answer, aUnitNum, 1e-4F) && CheckData(&bUserGPU, answer, aUnitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a; delete a;
...@@ -146,7 +147,7 @@ bool TestNegate2() ...@@ -146,7 +147,7 @@ bool TestNegate2()
bUser = Negate(*a); bUser = Negate(*a);
/* check results */ /* check results */
cpuTest = b->CheckData(answer, aUnitNum, 1e-4F) && aMe->CheckData(answer, aUnitNum, 1e-4F) && bUser.CheckData(answer, aUnitNum, 1e-4F); cpuTest = CheckData(b, answer, aUnitNum, 1e-4F) && CheckData(aMe, answer, aUnitNum, 1e-4F) && CheckData(&bUser, answer, aUnitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -168,7 +169,7 @@ bool TestNegate2() ...@@ -168,7 +169,7 @@ bool TestNegate2()
bUserGPU = Negate(*aGPU); bUserGPU = Negate(*aGPU);
/* check results */ /* check results */
gpuTest = bGPU->CheckData(answer, aUnitNum, 1e-4F) && aMeGPU->CheckData(answer, aUnitNum, 1e-4F) && bUserGPU.CheckData(answer, aUnitNum, 1e-4F); gpuTest = CheckData(bGPU, answer, aUnitNum, 1e-4F) && CheckData(aMeGPU, answer, aUnitNum, 1e-4F) && CheckData(&bUserGPU, answer, aUnitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a; delete a;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-20 * $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-20
*/ */
#include "../core/utilities/CheckData.h"
#include "TNormalize.h" #include "TNormalize.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -126,8 +127,8 @@ bool TestNormalize1() ...@@ -126,8 +127,8 @@ bool TestNormalize1()
tUser = Normalize(*s, 0, *mean, *var, *a, *b, 0.0F); tUser = Normalize(*s, 0, *mean, *var, *a, *b, 0.0F);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum, 1e-4F) cpuTest = CheckData(t, answer, tUnitNum, 1e-4F)
&& tMe->CheckData(answer, tUnitNum, 1e-4F) && tUser.CheckData(answer, tUnitNum, 1e-4F); && CheckData(tMe, answer, tUnitNum, 1e-4F) && CheckData(&tUser, answer, tUnitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -158,8 +159,8 @@ bool TestNormalize1() ...@@ -158,8 +159,8 @@ bool TestNormalize1()
tUserGPU = Normalize(*sGPU, 0, *meanGPU, *varGPU, *aGPU, *bGPU, 0.0F); tUserGPU = Normalize(*sGPU, 0, *meanGPU, *varGPU, *aGPU, *bGPU, 0.0F);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum, 1e-4F) gpuTest = CheckData(tGPU, answer, tUnitNum, 1e-4F)
&& tMeGPU->CheckData(answer, tUnitNum, 1e-4F) && tUserGPU.CheckData(answer, tUnitNum, 1e-4F); && CheckData(tMeGPU, answer, tUnitNum, 1e-4F) && CheckData(&tUserGPU, answer, tUnitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete s; delete s;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "../core/math/Binary.h" #include "../core/math/Binary.h"
#include "../core/utilities/CheckData.h"
#include "../XUtility.h" #include "../XUtility.h"
#include "TPower.h" #include "TPower.h"
...@@ -67,9 +68,9 @@ bool TestPower1() ...@@ -67,9 +68,9 @@ bool TestPower1()
bUser = Power(*a, 2.0F); bUser = Power(*a, 2.0F);
/* check results */ /* check results */
cpuTest = b->CheckData(answer, aUnitNum, 1e-4F) && cpuTest = CheckData(b, answer, aUnitNum, 1e-4F) &&
aMe->CheckData(answer, aUnitNum, 1e-4F) && CheckData(aMe, answer, aUnitNum, 1e-4F) &&
bUser.CheckData(answer, aUnitNum, 1e-4F); CheckData(&bUser, answer, aUnitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -91,9 +92,9 @@ bool TestPower1() ...@@ -91,9 +92,9 @@ bool TestPower1()
bUserGPU = Power(*aGPU, 2.0F); bUserGPU = Power(*aGPU, 2.0F);
/* check results */ /* check results */
gpuTest = bGPU->CheckData(answer, aUnitNum, 1e-4F) && gpuTest = CheckData(bGPU, answer, aUnitNum, 1e-4F) &&
aMeGPU->CheckData(answer, aUnitNum, 1e-4F) && CheckData(aMeGPU, answer, aUnitNum, 1e-4F) &&
bUserGPU.CheckData(answer, aUnitNum, 1e-4F); CheckData(&bUserGPU, answer, aUnitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a; delete a;
...@@ -158,9 +159,9 @@ bool TestPower2() ...@@ -158,9 +159,9 @@ bool TestPower2()
bUser = Power(*a, 1.0F); bUser = Power(*a, 1.0F);
/* check results */ /* check results */
cpuTest = b->CheckData(answer, aUnitNum, 1e-4F) && cpuTest = CheckData(b, answer, aUnitNum, 1e-4F) &&
aMe->CheckData(answer, aUnitNum, 1e-4F) && CheckData(aMe, answer, aUnitNum, 1e-4F) &&
bUser.CheckData(answer, aUnitNum, 1e-4F); CheckData(&bUser, answer, aUnitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -182,9 +183,9 @@ bool TestPower2() ...@@ -182,9 +183,9 @@ bool TestPower2()
bUserGPU = Power(*aGPU, 1.0F); bUserGPU = Power(*aGPU, 1.0F);
/* check results */ /* check results */
gpuTest = bGPU->CheckData(answer, aUnitNum, 1e-4F) && gpuTest = CheckData(bGPU, answer, aUnitNum, 1e-4F) &&
aMeGPU->CheckData(answer, aUnitNum, 1e-4F) && CheckData(aMeGPU, answer, aUnitNum, 1e-4F) &&
bUserGPU.CheckData(answer, aUnitNum, 1e-4F); CheckData(&bUserGPU, answer, aUnitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a; delete a;
...@@ -249,9 +250,9 @@ bool TestPower3() ...@@ -249,9 +250,9 @@ bool TestPower3()
bUser = Power(*a, 0.0F); bUser = Power(*a, 0.0F);
/* check results */ /* check results */
cpuTest = b->CheckData(answer, aUnitNum, 1e-4F) && cpuTest = CheckData(b, answer, aUnitNum, 1e-4F) &&
aMe->CheckData(answer, aUnitNum, 1e-4F) && CheckData(aMe, answer, aUnitNum, 1e-4F) &&
bUser.CheckData(answer, aUnitNum, 1e-4F); CheckData(&bUser, answer, aUnitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -273,9 +274,9 @@ bool TestPower3() ...@@ -273,9 +274,9 @@ bool TestPower3()
bUserGPU = Power(*aGPU, 0.0F); bUserGPU = Power(*aGPU, 0.0F);
/* check results */ /* check results */
gpuTest = bGPU->CheckData(answer, aUnitNum, 1e-4F) && gpuTest = CheckData(bGPU, answer, aUnitNum, 1e-4F) &&
aMeGPU->CheckData(answer, aUnitNum, 1e-4F) && CheckData(aMeGPU, answer, aUnitNum, 1e-4F) &&
bUserGPU.CheckData(answer, aUnitNum, 1e-4F); CheckData(&bUserGPU, answer, aUnitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a; delete a;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-14 * $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-14
*/ */
#include "../core/utilities/CheckData.h"
#include "TRectify.h" #include "TRectify.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -61,7 +62,7 @@ bool TestRectify1() ...@@ -61,7 +62,7 @@ bool TestRectify1()
yUser = Rectify(*x); yUser = Rectify(*x);
/* check results */ /* check results */
cpuTest = y->CheckData(answer, unitNum, 1e-4F) && yUser.CheckData(answer, unitNum, 1e-4F); cpuTest = CheckData(y, answer, unitNum, 1e-4F) && CheckData(&yUser, answer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -81,7 +82,7 @@ bool TestRectify1() ...@@ -81,7 +82,7 @@ bool TestRectify1()
yUserGPU = Rectify(*xGPU); yUserGPU = Rectify(*xGPU);
/* check results */ /* check results */
gpuTest = yGPU->CheckData(answer, unitNum, 1e-4F) && yUserGPU.CheckData(answer, unitNum, 1e-4F); gpuTest = CheckData(yGPU, answer, unitNum, 1e-4F) && CheckData(&yUserGPU, answer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
...@@ -149,7 +150,7 @@ bool TestRectify2() ...@@ -149,7 +150,7 @@ bool TestRectify2()
_RectifyBackward(y, x, dedy, dedx); _RectifyBackward(y, x, dedy, dedx);
/* check results */ /* check results */
cpuTest = dedx->CheckData(dedxAnswer, unitNum, 1e-4F); cpuTest = CheckData(dedx, dedxAnswer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -173,7 +174,7 @@ bool TestRectify2() ...@@ -173,7 +174,7 @@ bool TestRectify2()
_RectifyBackward(yGPU, xGPU, dedyGPU, dedxGPU); _RectifyBackward(yGPU, xGPU, dedyGPU, dedxGPU);
/* check results */ /* check results */
gpuTest = dedxGPU->CheckData(dedxAnswer, unitNum, 1e-4F); gpuTest = CheckData(dedxGPU, dedxAnswer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-30 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-30
*/ */
#include "../core/utilities/CheckData.h"
#include "TReduceMax.h" #include "TReduceMax.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -86,8 +87,8 @@ bool TestReduceMax1() ...@@ -86,8 +87,8 @@ bool TestReduceMax1()
tUser2 = ReduceMax(*s, 1); tUser2 = ReduceMax(*s, 1);
/* check results */ /* check results */
cpuTest = t1->CheckData(answer1, tUnitNum1) && tUser1.CheckData(answer1, tUnitNum1) cpuTest = CheckData(t1, answer1, tUnitNum1) && CheckData(&tUser1, answer1, tUnitNum1)
&& t2->CheckData(answer2, tUnitNum2) && tUser2.CheckData(answer2, tUnitNum2); && CheckData(t2, answer2, tUnitNum2) && CheckData(&tUser2, answer2, tUnitNum2);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -112,8 +113,8 @@ bool TestReduceMax1() ...@@ -112,8 +113,8 @@ bool TestReduceMax1()
tUserGPU2 = ReduceMax(*sGPU, 1); tUserGPU2 = ReduceMax(*sGPU, 1);
/* check results */ /* check results */
gpuTest = tGPU1->CheckData(answer1, tUnitNum1) && tUserGPU1.CheckData(answer1, tUnitNum1) gpuTest = CheckData(tGPU1, answer1, tUnitNum1) && CheckData(&tUserGPU1, answer1, tUnitNum1)
&& tGPU2->CheckData(answer2, tUnitNum2) && tUserGPU2.CheckData(answer2, tUnitNum2); && CheckData(tGPU2, answer2, tUnitNum2) && CheckData(&tUserGPU2, answer2, tUnitNum2);
/* destroy variables */ /* destroy variables */
delete s; delete s;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2018-04-30 * $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2018-04-30
*/ */
#include "../core/utilities/CheckData.h"
#include "TReduceMean.h" #include "TReduceMean.h"
namespace nts { // namespace nt(NiuTrans.Tensor) namespace nts { // namespace nt(NiuTrans.Tensor)
...@@ -81,8 +82,8 @@ bool TestReduceMean1() ...@@ -81,8 +82,8 @@ bool TestReduceMean1()
tUser2 = ReduceMean(*s, 1); tUser2 = ReduceMean(*s, 1);
/* check results */ /* check results */
cpuTest = t1->CheckData(answer1, tUnitNum1) && tUser1.CheckData(answer1, tUnitNum1) cpuTest = CheckData(t1, answer1, tUnitNum1) && CheckData(&tUser1, answer1, tUnitNum1)
&& t2->CheckData(answer2, tUnitNum2) && tUser2.CheckData(answer2, tUnitNum2); && CheckData(t2, answer2, tUnitNum2) && CheckData(&tUser2, answer2, tUnitNum2);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -107,8 +108,8 @@ bool TestReduceMean1() ...@@ -107,8 +108,8 @@ bool TestReduceMean1()
tUserGPU2 = ReduceMean(*sGPU, 1); tUserGPU2 = ReduceMean(*sGPU, 1);
/* check results */ /* check results */
gpuTest = tGPU1->CheckData(answer1, tUnitNum1) && tUserGPU1.CheckData(answer1, tUnitNum1) gpuTest = CheckData(tGPU1, answer1, tUnitNum1) && CheckData(&tUserGPU1, answer1, tUnitNum1)
&& tGPU2->CheckData(answer2, tUnitNum2) && tUserGPU2.CheckData(answer2, tUnitNum2); && CheckData(tGPU2, answer2, tUnitNum2) && CheckData(&tUserGPU2, answer2, tUnitNum2);
/* destroy variables */ /* destroy variables */
delete s; delete s;
......
...@@ -19,8 +19,9 @@ ...@@ -19,8 +19,9 @@
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2018-04-30 * $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2018-04-30
*/ */
#include "TReduceSum.h"
#include "../core/getandset/SetData.h" #include "../core/getandset/SetData.h"
#include "../core/utilities/CheckData.h"
#include "TReduceSum.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -92,8 +93,8 @@ bool TestReduceSum1() ...@@ -92,8 +93,8 @@ bool TestReduceSum1()
tUser2 = ReduceSum(*s, 1, *shift2); tUser2 = ReduceSum(*s, 1, *shift2);
/* check results */ /* check results */
cpuTest = t1->CheckData(answer1, tUnitNum1) && tUser1.CheckData(answer1, tUnitNum1) && cpuTest = CheckData(t1, answer1, tUnitNum1) && CheckData(&tUser1, answer1, tUnitNum1) &&
t2->CheckData(answer2, tUnitNum2) && tUser2.CheckData(answer2, tUnitNum2); CheckData(t2, answer2, tUnitNum2) && CheckData(&tUser2, answer2, tUnitNum2);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -122,8 +123,8 @@ bool TestReduceSum1() ...@@ -122,8 +123,8 @@ bool TestReduceSum1()
tUserGPU2 = ReduceSum(*sGPU, 1, *shiftGPU2); tUserGPU2 = ReduceSum(*sGPU, 1, *shiftGPU2);
/* check results */ /* check results */
gpuTest = tGPU1->CheckData(answer1, tUnitNum1) && tUserGPU1.CheckData(answer1, tUnitNum1) && gpuTest = CheckData(tGPU1, answer1, tUnitNum1) && CheckData(&tUserGPU1, answer1, tUnitNum1) &&
tGPU2->CheckData(answer2, tUnitNum2) && tUserGPU2.CheckData(answer2, tUnitNum2); CheckData(tGPU2, answer2, tUnitNum2) && CheckData(&tUserGPU2, answer2, tUnitNum2);
/* destroy variables */ /* destroy variables */
delete s; delete s;
...@@ -202,7 +203,7 @@ bool TestReduceSum2() ...@@ -202,7 +203,7 @@ bool TestReduceSum2()
tUser = ReduceSum(*s, 1); tUser = ReduceSum(*s, 1);
/* check results */ /* check results */
cpuTest = t->CheckData(answer->data, tUnitNum) && tUser.CheckData(answer->data, tUnitNum); cpuTest = CheckData(t, answer->data, tUnitNum) && CheckData(&tUser, answer->data, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -221,7 +222,7 @@ bool TestReduceSum2() ...@@ -221,7 +222,7 @@ bool TestReduceSum2()
tUserGPU = ReduceSum(*sGPU, 1); tUserGPU = ReduceSum(*sGPU, 1);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer->data, tUnitNum) && tUserGPU.CheckData(answer->data, tUnitNum); gpuTest = CheckData(tGPU, answer->data, tUnitNum) && CheckData(&tUserGPU, answer->data, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
...@@ -291,7 +292,7 @@ bool TestReduceSum3() ...@@ -291,7 +292,7 @@ bool TestReduceSum3()
tUser = ReduceSum(*s, 1); tUser = ReduceSum(*s, 1);
/* check results */ /* check results */
cpuTest = t->CheckData(answer->data, tUnitNum) && tUser.CheckData(answer->data, tUnitNum); cpuTest = CheckData(t, answer->data, tUnitNum) && CheckData(&tUser, answer->data, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -310,7 +311,7 @@ bool TestReduceSum3() ...@@ -310,7 +311,7 @@ bool TestReduceSum3()
tUserGPU = ReduceSum(*sGPU, 1); tUserGPU = ReduceSum(*sGPU, 1);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer->data, tUnitNum) && tUserGPU.CheckData(answer->data, tUnitNum); gpuTest = CheckData(tGPU, answer->data, tUnitNum) && CheckData(&tUserGPU, answer->data, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
...@@ -380,7 +381,7 @@ bool TestReduceSum4() ...@@ -380,7 +381,7 @@ bool TestReduceSum4()
tUser = ReduceSum(*s, 1); tUser = ReduceSum(*s, 1);
/* check results */ /* check results */
cpuTest = t->CheckData(answer->data, tUnitNum) && tUser.CheckData(answer->data, tUnitNum); cpuTest = CheckData(t, answer->data, tUnitNum) && CheckData(&tUser, answer->data, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -399,7 +400,7 @@ bool TestReduceSum4() ...@@ -399,7 +400,7 @@ bool TestReduceSum4()
tUserGPU = ReduceSum(*sGPU, 1); tUserGPU = ReduceSum(*sGPU, 1);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer->data, tUnitNum) && tUserGPU.CheckData(answer->data, tUnitNum); gpuTest = CheckData(tGPU, answer->data, tUnitNum) && CheckData(&tUserGPU, answer->data, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
...@@ -471,7 +472,7 @@ bool TestReduceSum5() ...@@ -471,7 +472,7 @@ bool TestReduceSum5()
tUser = ReduceSum(*s, 1); tUser = ReduceSum(*s, 1);
/* check results */ /* check results */
cpuTest = t->CheckData(answer->data, tUnitNum) && tUser.CheckData(answer->data, tUnitNum); cpuTest = CheckData(t, answer->data, tUnitNum) && CheckData(&tUser, answer->data, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -490,7 +491,7 @@ bool TestReduceSum5() ...@@ -490,7 +491,7 @@ bool TestReduceSum5()
tUserGPU = ReduceSum(*sGPU, 1); tUserGPU = ReduceSum(*sGPU, 1);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer->data, tUnitNum) && tUserGPU.CheckData(answer->data, tUnitNum); gpuTest = CheckData(tGPU, answer->data, tUnitNum) && CheckData(&tUserGPU, answer->data, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
...@@ -563,7 +564,7 @@ bool TestReduceSum6() ...@@ -563,7 +564,7 @@ bool TestReduceSum6()
tUser = ReduceSum(*s, 1); tUser = ReduceSum(*s, 1);
/* check results */ /* check results */
cpuTest = t->CheckData(answer->data, tUnitNum) && tUser.CheckData(answer->data, tUnitNum); cpuTest = CheckData(t, answer->data, tUnitNum) && CheckData(&tUser, answer->data, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -582,7 +583,7 @@ bool TestReduceSum6() ...@@ -582,7 +583,7 @@ bool TestReduceSum6()
tUserGPU = ReduceSum(*sGPU, 1); tUserGPU = ReduceSum(*sGPU, 1);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer->data, tUnitNum) && tUserGPU.CheckData(answer->data, tUnitNum); gpuTest = CheckData(tGPU, answer->data, tUnitNum) && CheckData(&tUserGPU, answer->data, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
......
...@@ -19,8 +19,8 @@ ...@@ -19,8 +19,8 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-09-27 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-09-27
*/ */
#include "TReduceSumAll.h"
#include <math.h> #include <math.h>
#include "TReduceSumAll.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/ */
#include "../core/utilities/CheckData.h"
#include "TReduceSumSquared.h" #include "TReduceSumSquared.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -82,7 +83,7 @@ bool TestReduceSumSquared1() ...@@ -82,7 +83,7 @@ bool TestReduceSumSquared1()
tUser = ReduceSumSquared(*s, 0, *shift); tUser = ReduceSumSquared(*s, 0, *shift);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum) && CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -104,7 +105,7 @@ bool TestReduceSumSquared1() ...@@ -104,7 +105,7 @@ bool TestReduceSumSquared1()
tUserGPU = ReduceSumSquared(*sGPU, 0, *shiftGPU); tUserGPU = ReduceSumSquared(*sGPU, 0, *shiftGPU);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum) && CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
...@@ -190,7 +191,7 @@ bool TestReduceSumSquared2() ...@@ -190,7 +191,7 @@ bool TestReduceSumSquared2()
tUser = ReduceSumSquared(*s, 1, *shift); tUser = ReduceSumSquared(*s, 1, *shift);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum) && CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -212,7 +213,7 @@ bool TestReduceSumSquared2() ...@@ -212,7 +213,7 @@ bool TestReduceSumSquared2()
tUserGPU = ReduceSumSquared(*sGPU, 1, *shiftGPU); tUserGPU = ReduceSumSquared(*sGPU, 1, *shiftGPU);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum) && CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/ */
#include "../core/utilities/CheckData.h"
#include "TReduceVariance.h" #include "TReduceVariance.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -82,7 +83,7 @@ bool TestReduceVariance1() ...@@ -82,7 +83,7 @@ bool TestReduceVariance1()
tUser = ReduceVariance(*s, 0, *mean); tUser = ReduceVariance(*s, 0, *mean);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum) && CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -104,7 +105,7 @@ bool TestReduceVariance1() ...@@ -104,7 +105,7 @@ bool TestReduceVariance1()
tUserGPU = ReduceVariance(*sGPU, 0, *meanGPU); tUserGPU = ReduceVariance(*sGPU, 0, *meanGPU);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum) && CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "../core/math/Unary.h" #include "../core/math/Unary.h"
#include "../core/utilities/CheckData.h"
#include "TRound.h" #include "TRound.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -68,9 +69,9 @@ bool TestRound1() ...@@ -68,9 +69,9 @@ bool TestRound1()
//bUser = Round(*a); //bUser = Round(*a);
/* check results */ /* check results */
cpuTest = b->CheckData(answer, unitNum, 1e-4F) && cpuTest = CheckData(b, answer, unitNum, 1e-4F) &&
aMe->CheckData(answer, unitNum, 1e-4F) && CheckData(aMe, answer, unitNum, 1e-4F) &&
bUser.CheckData(answer, unitNum, 1e-4F); CheckData(&bUser, answer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -92,9 +93,9 @@ bool TestRound1() ...@@ -92,9 +93,9 @@ bool TestRound1()
//bUserGPU = Round(*aGPU); //bUserGPU = Round(*aGPU);
/* check results */ /* check results */
gpuTest = bGPU->CheckData(answer, unitNum, 1e-4F) && gpuTest = CheckData(bGPU, answer, unitNum, 1e-4F) &&
aMeGPU->CheckData(answer, unitNum, 1e-4F) && CheckData(aMeGPU, answer, unitNum, 1e-4F) &&
bUserGPU.CheckData(answer, unitNum, 1e-4F); CheckData(&bUserGPU, answer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a; delete a;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/ */
#include "../core/utilities/CheckData.h"
#include "TScaleAndShift.h" #include "TScaleAndShift.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -66,8 +67,8 @@ bool TestScaleAndShift1() ...@@ -66,8 +67,8 @@ bool TestScaleAndShift1()
tUser = ScaleAndShift(*s, scaleFactor, shiftFactor); tUser = ScaleAndShift(*s, scaleFactor, shiftFactor);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, sUnitNum) && cpuTest = CheckData(t, answer, sUnitNum) &&
tMe->CheckData(answer, sUnitNum) && tUser.CheckData(answer, sUnitNum); CheckData(tMe, answer, sUnitNum) && CheckData(&tUser, answer, sUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -89,8 +90,8 @@ bool TestScaleAndShift1() ...@@ -89,8 +90,8 @@ bool TestScaleAndShift1()
tUserGPU = ScaleAndShift(*sGPU, scaleFactor, shiftFactor); tUserGPU = ScaleAndShift(*sGPU, scaleFactor, shiftFactor);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, sUnitNum) && gpuTest = CheckData(tGPU, answer, sUnitNum) &&
tMeGPU->CheckData(answer, sUnitNum) && tUserGPU.CheckData(answer, sUnitNum); CheckData(tMeGPU, answer, sUnitNum) && CheckData(&tUserGPU, answer, sUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-04 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-04
*/ */
#include "../core/utilities/CheckData.h"
#include "TSelect.h" #include "TSelect.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -78,7 +79,7 @@ bool TestSelect1() ...@@ -78,7 +79,7 @@ bool TestSelect1()
tUser = SelectRange(*s, 2, 1, 3); tUser = SelectRange(*s, 2, 1, 3);
/* check results */ /* check results */
cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum); cpuTest = CheckData(t, answer, tUnitNum) && CheckData(&tUser, answer, tUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -98,7 +99,7 @@ bool TestSelect1() ...@@ -98,7 +99,7 @@ bool TestSelect1()
tUserGPU = SelectRange(*sGPU, 2, 1, 3); tUserGPU = SelectRange(*sGPU, 2, 1, 3);
/* check results */ /* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum); gpuTest = CheckData(tGPU, answer, tUnitNum) && CheckData(&tUserGPU, answer, tUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-06 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-06
*/ */
#include "../core/utilities/CheckData.h"
#include "TSetAscendingOrder.h" #include "TSetAscendingOrder.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -52,7 +53,7 @@ bool TestSetAscendingOrder1() ...@@ -52,7 +53,7 @@ bool TestSetAscendingOrder1()
s->SetAscendingOrder(1); s->SetAscendingOrder(1);
/* check results */ /* check results */
cpuTest = s->CheckData(answer, sUnitNum); cpuTest = CheckData(s, answer, sUnitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -68,7 +69,7 @@ bool TestSetAscendingOrder1() ...@@ -68,7 +69,7 @@ bool TestSetAscendingOrder1()
sGPU->SetAscendingOrder(1); sGPU->SetAscendingOrder(1);
/* check results */ /* check results */
gpuTest = sGPU->CheckData(answer, sUnitNum); gpuTest = CheckData(sGPU, answer, sUnitNum);
/* destroy variables */ /* destroy variables */
delete s; delete s;
......
...@@ -19,8 +19,9 @@ ...@@ -19,8 +19,9 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-06 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-06
*/ */
#include "TSetData.h" #include "../core/utilities/CheckData.h"
#include "../core/getandset/SetData.h" #include "../core/getandset/SetData.h"
#include "TSetData.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -52,7 +53,7 @@ bool TestSetData1() ...@@ -52,7 +53,7 @@ bool TestSetData1()
s->SetDataRand(0.0, 1.0); s->SetDataRand(0.0, 1.0);
/* check results */ /* check results */
cpuTest = s->CheckData(answer, sUnitNum, 1.0F); cpuTest = CheckData(s, answer, sUnitNum, 1.0F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -64,7 +65,7 @@ bool TestSetData1() ...@@ -64,7 +65,7 @@ bool TestSetData1()
/* call SetDataRand function */ /* call SetDataRand function */
sGPU->SetDataRand(0.0, 1.0); sGPU->SetDataRand(0.0, 1.0);
gpuTest = sGPU->CheckData(answer, sUnitNum, 1.0F); gpuTest = CheckData(sGPU, answer, sUnitNum, 1.0F);
/* destroy variables */ /* destroy variables */
delete s; delete s;
...@@ -125,7 +126,7 @@ bool TestSetData2() ...@@ -125,7 +126,7 @@ bool TestSetData2()
_SetDataIndexed(s, modify, 0, 1); _SetDataIndexed(s, modify, 0, 1);
/* check results */ /* check results */
cpuTest = s->CheckData(answer, sUnitNum, 1e-5F); cpuTest = CheckData(s, answer, sUnitNum, 1e-5F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -142,7 +143,7 @@ bool TestSetData2() ...@@ -142,7 +143,7 @@ bool TestSetData2()
/* call SetDataIndexed function */ /* call SetDataIndexed function */
_SetDataIndexed(sGPU, modifyGPU, 0, 1); _SetDataIndexed(sGPU, modifyGPU, 0, 1);
gpuTest = sGPU->CheckData(answer, sUnitNum, 1e-5F); gpuTest = CheckData(sGPU, answer, sUnitNum, 1e-5F);
/* destroy variables */ /* destroy variables */
delete s; delete s;
...@@ -219,7 +220,7 @@ bool TestSetData3() ...@@ -219,7 +220,7 @@ bool TestSetData3()
_SetDataIndexed(s, modify, 1, 1); _SetDataIndexed(s, modify, 1, 1);
/* check results */ /* check results */
cpuTest = s->CheckData(answer, sUnitNum, 1e-5F); cpuTest = CheckData(s, answer, sUnitNum, 1e-5F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -236,7 +237,7 @@ bool TestSetData3() ...@@ -236,7 +237,7 @@ bool TestSetData3()
/* call SetDataIndexed function */ /* call SetDataIndexed function */
_SetDataIndexed(sGPU, modifyGPU, 1, 1); _SetDataIndexed(sGPU, modifyGPU, 1, 1);
gpuTest = sGPU->CheckData(answer, sUnitNum, 1e-5F); gpuTest = CheckData(sGPU, answer, sUnitNum, 1e-5F);
/* destroy variables */ /* destroy variables */
delete s; delete s;
...@@ -294,7 +295,7 @@ bool TestSetData4() ...@@ -294,7 +295,7 @@ bool TestSetData4()
_SetDataDim(s, 1, 1, 0, 0); _SetDataDim(s, 1, 1, 0, 0);
/* check results */ /* check results */
cpuTest = s->CheckData(answer, unitNum, 1e-4F); cpuTest = CheckData(s, answer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -309,7 +310,7 @@ bool TestSetData4() ...@@ -309,7 +310,7 @@ bool TestSetData4()
/* call _SetDataDim function */ /* call _SetDataDim function */
_SetDataDim(sGPU, 1, 1, 0, 0); _SetDataDim(sGPU, 1, 1, 0, 0);
gpuTest = sGPU->CheckData(answer, unitNum, 1e-4F); gpuTest = CheckData(sGPU, answer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete s; delete s;
...@@ -374,7 +375,7 @@ bool TestSetData5() ...@@ -374,7 +375,7 @@ bool TestSetData5()
_SetDataDim(s, 2, 1, 1, 5.0F); _SetDataDim(s, 2, 1, 1, 5.0F);
/* check results */ /* check results */
cpuTest = s->CheckData(answer, unitNum, 1e-4F); cpuTest = CheckData(s, answer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -389,7 +390,7 @@ bool TestSetData5() ...@@ -389,7 +390,7 @@ bool TestSetData5()
/* call _SetDataDim function */ /* call _SetDataDim function */
_SetDataDim(sGPU, 2, 1, 1, 5.0F); _SetDataDim(sGPU, 2, 1, 1, 5.0F);
gpuTest = sGPU->CheckData(answer, unitNum, 1e-4F); gpuTest = CheckData(sGPU, answer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete s; delete s;
...@@ -436,7 +437,7 @@ bool TestSetData6() ...@@ -436,7 +437,7 @@ bool TestSetData6()
_SetDataRange(s, 5.2, -3.2, -2); _SetDataRange(s, 5.2, -3.2, -2);
/* check results */ /* check results */
cpuTest = s->CheckData(answer, unitNum, 1e-4F); cpuTest = CheckData(s, answer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -451,7 +452,7 @@ bool TestSetData6() ...@@ -451,7 +452,7 @@ bool TestSetData6()
/* call _SetDataRange function */ /* call _SetDataRange function */
_SetDataRange(sGPU, 5.2, -3.2, -2); _SetDataRange(sGPU, 5.2, -3.2, -2);
gpuTest = sGPU->CheckData(answer, unitNum, 1e-4F); gpuTest = CheckData(sGPU, answer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete s; delete s;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "../XUtility.h" #include "../XUtility.h"
#include "../core/utilities/CheckData.h"
#include "TSigmoid.h" #include "TSigmoid.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -59,8 +60,8 @@ bool TestSigmoid1() ...@@ -59,8 +60,8 @@ bool TestSigmoid1()
yUser = Sigmoid(*x); yUser = Sigmoid(*x);
/* check result */ /* check result */
cpuTest = y->CheckData(answer, unitNum, 1e-4F) && cpuTest = CheckData(y, answer, unitNum, 1e-4F) &&
yUser.CheckData(answer, unitNum, 1e-4F); CheckData(&yUser, answer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -80,8 +81,8 @@ bool TestSigmoid1() ...@@ -80,8 +81,8 @@ bool TestSigmoid1()
yUserGPU = Sigmoid(*xGPU); yUserGPU = Sigmoid(*xGPU);
/* check result */ /* check result */
gpuTest = yGPU->CheckData(answer, unitNum, 1e-4F) && gpuTest = CheckData(yGPU, answer, unitNum, 1e-4F) &&
yUserGPU.CheckData(answer, unitNum, 1e-4F); CheckData(&yUserGPU, answer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
...@@ -147,8 +148,8 @@ bool TestSigmoid2() ...@@ -147,8 +148,8 @@ bool TestSigmoid2()
_SigmoidBackward(y, x, dedy, dedx); _SigmoidBackward(y, x, dedy, dedx);
/* check result */ /* check result */
cpuTest = y->CheckData(yAnswer, unitNum, 1e-4F) && cpuTest = CheckData(y, yAnswer, unitNum, 1e-4F) &&
dedx->CheckData(dedxAnswer, unitNum, 1e-4F); CheckData(dedx, dedxAnswer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -173,8 +174,8 @@ bool TestSigmoid2() ...@@ -173,8 +174,8 @@ bool TestSigmoid2()
_SigmoidBackward(yGPU, xGPU, dedyGPU, dedxGPU); _SigmoidBackward(yGPU, xGPU, dedyGPU, dedxGPU);
/* check result */ /* check result */
gpuTest = yGPU->CheckData(yAnswer, unitNum, 1e-4F) && gpuTest = CheckData(yGPU, yAnswer, unitNum, 1e-4F) &&
dedxGPU->CheckData(dedxAnswer, unitNum, 1e-4F); CheckData(dedxGPU, dedxAnswer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-12 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-12
*/ */
#include "../core/utilities/CheckData.h"
#include "TSign.h" #include "TSign.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -65,7 +66,7 @@ bool TestSign1() ...@@ -65,7 +66,7 @@ bool TestSign1()
bUser = Sign(*a); bUser = Sign(*a);
/* check results */ /* check results */
cpuTest = b->CheckData(answer, aUnitNum, 1e-4F) && aMe->CheckData(answer, aUnitNum, 1e-4F) && bUser.CheckData(answer, aUnitNum, 1e-4F); cpuTest = CheckData(b, answer, aUnitNum, 1e-4F) && CheckData(aMe, answer, aUnitNum, 1e-4F) && CheckData(&bUser, answer, aUnitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -87,8 +88,7 @@ bool TestSign1() ...@@ -87,8 +88,7 @@ bool TestSign1()
bUserGPU = Sign(*aGPU); bUserGPU = Sign(*aGPU);
/* check results */ /* check results */
gpuTest = bGPU->CheckData(answer, aUnitNum, 1e-4F) && aMeGPU->CheckData(answer, aUnitNum, 1e-4F) && bUserGPU.CheckData(answer, aUnitNum, 1e-4F); gpuTest = CheckData(bGPU, answer, aUnitNum, 1e-4F) && CheckData(aMeGPU, answer, aUnitNum, 1e-4F) && CheckData(&bUserGPU, answer, aUnitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a; delete a;
delete b; delete b;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "../core/math/Unary.h" #include "../core/math/Unary.h"
#include "../core/utilities/CheckData.h"
#include "TSin.h" #include "TSin.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -66,7 +67,7 @@ bool TestSin1() ...@@ -66,7 +67,7 @@ bool TestSin1()
bUser = Sin(*a); bUser = Sin(*a);
/* check results */ /* check results */
cpuTest = b->CheckData(answer, unitNum, 1e-4F) && aMe->CheckData(answer, unitNum, 1e-4F) && bUser.CheckData(answer, unitNum, 1e-4F); cpuTest = CheckData(b, answer, unitNum, 1e-4F) && CheckData(aMe, answer, unitNum, 1e-4F) && CheckData(&bUser, answer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -88,7 +89,7 @@ bool TestSin1() ...@@ -88,7 +89,7 @@ bool TestSin1()
bUserGPU = Sin(*aGPU); bUserGPU = Sin(*aGPU);
/* check results */ /* check results */
gpuTest = bGPU->CheckData(answer, unitNum, 1e-4F) && aMeGPU->CheckData(answer, unitNum, 1e-4F) && bUserGPU.CheckData(answer, unitNum, 1e-4F); gpuTest = CheckData(bGPU, answer, unitNum, 1e-4F) && CheckData(aMeGPU, answer, unitNum, 1e-4F) && CheckData(&bUserGPU, answer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete a; delete a;
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "../XTensor.h" #include "../XTensor.h"
#include "../XUtility.h" #include "../XUtility.h"
#include "../core/utilities/CheckData.h"
#include "TSoftmax.h" #include "TSoftmax.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -63,7 +64,7 @@ bool TestSoftmax1() ...@@ -63,7 +64,7 @@ bool TestSoftmax1()
yUser = Softmax(*x, 1); yUser = Softmax(*x, 1);
/* check result */ /* check result */
cpuTest = y->CheckData(answer, unitNum, 1e-4F) && yUser.CheckData(answer, unitNum, 1e-4F); cpuTest = CheckData(y, answer, unitNum, 1e-4F) && CheckData(&yUser, answer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -83,7 +84,7 @@ bool TestSoftmax1() ...@@ -83,7 +84,7 @@ bool TestSoftmax1()
yUserGPU = Softmax(*xGPU, 1); yUserGPU = Softmax(*xGPU, 1);
/* check result */ /* check result */
gpuTest = yGPU->CheckData(answer, unitNum, 1e-4F) && yUserGPU.CheckData(answer, unitNum, 1e-4F); gpuTest = CheckData(yGPU, answer, unitNum, 1e-4F) && CheckData(&yUserGPU, answer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
...@@ -149,8 +150,8 @@ bool TestSoftmax2() ...@@ -149,8 +150,8 @@ bool TestSoftmax2()
_SoftmaxBackward(g, y, x, dedy, dedx, NULL, 1, CROSSENTROPY); _SoftmaxBackward(g, y, x, dedy, dedx, NULL, 1, CROSSENTROPY);
/* check result */ /* check result */
cpuTest = y->CheckData(yAnswer, unitNum, 1e-4F) cpuTest = CheckData(y, yAnswer, unitNum, 1e-4F)
&& dedx->CheckData(dedxAnswer, unitNum, 1e-4F); && CheckData(dedx, dedxAnswer, unitNum, 1e-4F);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -177,8 +178,8 @@ bool TestSoftmax2() ...@@ -177,8 +178,8 @@ bool TestSoftmax2()
_SoftmaxBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, NULL, 1, CROSSENTROPY); _SoftmaxBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, NULL, 1, CROSSENTROPY);
/* check result */ /* check result */
gpuTest = yGPU->CheckData(yAnswer, unitNum, 1e-4F) gpuTest = CheckData(yGPU, yAnswer, unitNum, 1e-4F)
&& dedxGPU->CheckData(dedxAnswer, unitNum, 1e-4F); && CheckData(dedxGPU, dedxAnswer, unitNum, 1e-4F);
/* destroy variables */ /* destroy variables */
delete x; delete x;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: LI Yinqiao (li.yin.qiao.2012@hotmail.com) 2018-04-30 * $Created by: LI Yinqiao (li.yin.qiao.2012@hotmail.com) 2018-04-30
*/ */
#include "../core/utilities/CheckData.h"
#include "TSort.h" #include "TSort.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -61,9 +62,9 @@ bool TestSort1() ...@@ -61,9 +62,9 @@ bool TestSort1()
_SortMe(aMe, index, 0); _SortMe(aMe, index, 0);
Sort(*a, bUser, *index, 0); Sort(*a, bUser, *index, 0);
cpuTest = b->CheckData(answer, unitNum) && cpuTest = CheckData(b, answer, unitNum) &&
aMe->CheckData(answer, unitNum) && CheckData(aMe, answer, unitNum) &&
bUser.CheckData(answer, unitNum); CheckData(&bUser, answer, unitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -87,9 +88,9 @@ bool TestSort1() ...@@ -87,9 +88,9 @@ bool TestSort1()
Sort(*aGPU, bUserGPU, *indexGPU, 0); Sort(*aGPU, bUserGPU, *indexGPU, 0);
/* check results */ /* check results */
gpuTest = bGPU->CheckData(answer, unitNum) && gpuTest = CheckData(bGPU, answer, unitNum) &&
aMeGPU->CheckData(answer, unitNum) && CheckData(aMeGPU, answer, unitNum) &&
bUserGPU.CheckData(answer, unitNum); CheckData(&bUserGPU, answer, unitNum);
/* destroy variables */ /* destroy variables */
delete a; delete a;
...@@ -153,9 +154,9 @@ bool TestSort2() ...@@ -153,9 +154,9 @@ bool TestSort2()
Sort(*a, bUser, *index, 1); Sort(*a, bUser, *index, 1);
/* check results */ /* check results */
cpuTest = b->CheckData(answer, unitNum) && cpuTest = CheckData(b, answer, unitNum) &&
aMe->CheckData(answer, unitNum) && CheckData(aMe, answer, unitNum) &&
bUser.CheckData(answer, unitNum); CheckData(&bUser, answer, unitNum);
#ifdef USE_CUDA #ifdef USE_CUDA
/* GPU test */ /* GPU test */
...@@ -179,9 +180,9 @@ bool TestSort2() ...@@ -179,9 +180,9 @@ bool TestSort2()
Sort(*aGPU, bUserGPU, *indexGPU, 1); Sort(*aGPU, bUserGPU, *indexGPU, 1);
/* check results */ /* check results */
gpuTest = bGPU->CheckData(answer, unitNum) && gpuTest = CheckData(bGPU, answer, unitNum) &&
aMeGPU->CheckData(answer, unitNum) && CheckData(aMeGPU, answer, unitNum) &&
bUserGPU.CheckData(answer, unitNum); CheckData(&bUserGPU, answer, unitNum);
/* destroy variables */ /* destroy variables */
delete a; delete a;
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论