Commit b3ecba16 by liyinqiao

Update IsSameShaped function.

parent ddeebb47
......@@ -381,7 +381,7 @@ void XMathGrad::GradDiv(XTensor * node, bool isEfficient)
XNoder::MakeGrad(a);
XNoder::MakeGrad(b);
CheckNTErrors(IsSameShaped(a, b), "Wrong sized input tensors!");
CheckNTErrors(_IsSameShaped(a, b), "Wrong sized input tensors!");
_Div(node->grad, b, a->grad, 1.0F);
......@@ -726,7 +726,7 @@ void XMathGrad::GradMultiply(XTensor * node, bool isEfficient)
XTensor * a = income.tails[0];
XTensor * b = income.tails[1];
CheckNTErrors(IsSameShaped(a, b), "Wrong sized input tensors!");
CheckNTErrors(_IsSameShaped(a, b), "Wrong sized input tensors!");
if (!isEfficient || a->isGrad) {
XNoder::MakeGrad(a);
......
......@@ -281,7 +281,7 @@ void XShapeGrad::GradMergeList(XTensor * node, bool isEfficient)
smallsGrad.Add(tail->grad);
if(i > 1){
CheckNTErrors(IsSameShaped(last, tail),
CheckNTErrors(_IsSameShaped(last, tail),
"Input tensors must be of the same size!");
}
......
......@@ -29,7 +29,7 @@ void XNoder::MakeGrad(XTensor * node)
if(node == NULL)
return;
if(!IsSameShaped(node, node->grad)){
if(!_IsSameShaped(node, node->grad)){
delete node->grad;
node->grad = NewTensor(node);
node->grad->SetZeroAll();
......
......@@ -319,7 +319,7 @@ void T2TSearch::Generate(T2TStateBundle * beam)
for (int i = 0; i < indexGPU.unitNum; i++)
indexGPU.SetInt(i * stride + indexGPU.GetInt(i), i);
CheckNTErrors(IsSameShaped(&prob, &probPath), "Wrong tensor shape!");
CheckNTErrors(IsSameShaped(prob, probPath), "Wrong tensor shape!");
/* sequence probability of top-k candidates */
XTensor probPathTopK;
......
......@@ -360,7 +360,7 @@ XTensor& XTensor::operator= (const XTensor& tensor)
XLink::ClearOutgoing(this);
XLink::ClearIncoming(this);
if(!IsSameShaped(this, &tensor))
if(!_IsSameShaped(this, &tensor))
Resize(tensor.order, tensor.dimSize, tensor.dataType, tensor.denseRatio);
_Identity(&tensor, this);
......
......@@ -93,5 +93,6 @@
#include "utilities/XMatrixSegment.h"
#include "utilities/FlushToMem.h"
#include "utilities/CheckData.h"
#include "utilities/SetAscendingOrder.h"
#endif // __CHEADER_H__
......@@ -169,7 +169,7 @@ int GetDivDimIndex(const XTensor &a, const XTensor &b)
{
if(a.order < b.order)
return -1;
if(IsSameShaped(&a, &b))
if(IsSameShaped(a, b))
return -1;
int hitCount = 0;
......@@ -254,7 +254,7 @@ where i is the index of the item
*/
void Div(const XTensor &a, const XTensor &b, XTensor &c, DTYPE alpha, int leadingDim)
{
if (!c.isInit || !IsSameShaped(&a, &c)) {
if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a);
}
......
......@@ -57,7 +57,7 @@ void _DivDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE alp
CheckDev(a->devID, b->devID);
if(IsSameShaped(a, b)){
if(_IsSameShaped(a, b)){
_Div(a, b, c, alpha);
return;
}
......@@ -189,7 +189,7 @@ i.e., a is divided with b by broadcasting
*/
void DivDim(const XTensor &a, const XTensor &b, XTensor &c, int n, DTYPE alpha)
{
if (!c.isInit || !IsSameShaped(&a, &c)) {
if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a);
}
......
......@@ -172,7 +172,7 @@ where i is the index of the element
*/
void Mask(const XTensor &a, const XTensor &mask, XTensor &c, DTYPE alpha)
{
if (!c.isInit || !IsSameShaped(&a, &c)) {
if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a);
}
......
......@@ -243,9 +243,9 @@ void _MatrixMulBatchedCPU(const TensorList * a, MATRIX_TRANS_TYPE transposedA,
XTensor * ai = (XTensor*)a->GetItem(i);
XTensor * bi = (XTensor*)b->GetItem(i);
XTensor * ci = (XTensor*)c->GetItem(i);
if (!IsSameShaped(aim, ai) ||
!IsSameShaped(bim, bi) ||
!IsSameShaped(cim, ci))
if (!_IsSameShaped(aim, ai) ||
!_IsSameShaped(bim, bi) ||
!_IsSameShaped(cim, ci))
{
isUniform = false;
break;
......
......@@ -37,7 +37,7 @@ int GetSumIndex(const XTensor &a, const XTensor &b)
{
if (a.order < b.order)
return -1;
if (IsSameShaped(&a, &b))
if (IsSameShaped(a, b))
return -1;
int hitCount = 0;
......
......@@ -170,7 +170,7 @@ int GetMultiplyDimIndex(const XTensor &a, const XTensor &b)
{
if(a.order < b.order)
return -1;
if(IsSameShaped(&a, &b))
if(IsSameShaped(a, b))
return -1;
int hitCount = 0;
......@@ -255,7 +255,7 @@ where i is the index of the item
*/
void Multiply(const XTensor &a, const XTensor &b, XTensor &c, DTYPE alpha, int leadingDim)
{
if (!c.isInit || !IsSameShaped(&a, &c)) {
if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a);
}
......
......@@ -58,7 +58,7 @@ void _MultiplyDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYP
CheckDev(a->devID, b->devID);
if(IsSameShaped(a, b)){
if(_IsSameShaped(a, b)){
_Multiply(a, b, c, alpha);
return;
}
......@@ -204,7 +204,7 @@ i.e., a is multiplied with b by broadcasting
*/
void MultiplyDim(const XTensor &a, const XTensor &b, XTensor &c, int n)
{
if (!c.isInit || !IsSameShaped(&a, &c)) {
if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a);
}
......@@ -372,7 +372,7 @@ where some of dimensions of b can be of size 1
*/
void MultiplyBroadcast(const XTensor &a, const XTensor &b, XTensor &c)
{
if (!c.isInit || !IsSameShaped(&a, &c)) {
if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a);
}
......
......@@ -150,7 +150,7 @@ int GetSubDimIndex(const XTensor &a, const XTensor &b)
{
if(a.order < b.order)
return -1;
if(IsSameShaped(&a, &b))
if(IsSameShaped(a, b))
return -1;
int hitCount = 0;
......@@ -224,7 +224,7 @@ tensor subtraction c = a - b * \beta
*/
void Sub(const XTensor &a, const XTensor &b, XTensor &c, DTYPE beta)
{
if (!c.isInit || !IsSameShaped(&a, &c)) {
if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a);
}
......
......@@ -62,7 +62,7 @@ void _SubDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE bet
return;
}
if (IsSameShaped(a, b)) {
if (_IsSameShaped(a, b)) {
_Sub(a, b, c, beta);
return;
}
......@@ -189,7 +189,7 @@ i.e., a is subtracted with b by broadcasting
*/
void SubDim(const XTensor &a, const XTensor &b, XTensor &c, int n, DTYPE beta)
{
if (!c.isInit || !IsSameShaped(&a, &c)) {
if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a);
}
......
......@@ -184,7 +184,7 @@ int GetSumDimIndex(const XTensor &a, const XTensor &b)
{
if(a.order < b.order)
return -1;
if(IsSameShaped(&a, &b))
if(IsSameShaped(a, b))
return -1;
int hitCount = 0;
......@@ -257,7 +257,7 @@ tensor summation c = a + b * \beta
*/
void Sum(const XTensor &a, const XTensor &b, XTensor &c, DTYPE beta)
{
if (!c.isInit || !IsSameShaped(&a, &c)) {
if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a);
}
......
......@@ -65,7 +65,7 @@ void _SumDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE bet
return;
}
if(IsSameShaped(a, b)){
if(_IsSameShaped(a, b)){
_Sum(a, b, c, beta);
return;
}
......@@ -206,7 +206,7 @@ i.e., a is summed with b by broadcasting
*/
void SumDim(const XTensor &a, const XTensor &b, XTensor &c, int n, DTYPE beta)
{
if (!c.isInit || !IsSameShaped(&a, &c)) {
if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a);
}
......@@ -375,7 +375,7 @@ c = a + b * \beta
*/
void SumBroadcast(const XTensor &a, const XTensor &b, XTensor &c, DTYPE beta)
{
if (!c.isInit || !IsSameShaped(&a, &c)) {
if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a);
}
......
......@@ -225,9 +225,9 @@ void _CudaBLASMatrixMULList(cublasHandle_t * handle,
XTensor * ai = (XTensor*)a->GetItem(i);
XTensor * bi = (XTensor*)b->GetItem(i);
XTensor * ci = (XTensor*)c->GetItem(i);
if (!IsSameShaped(aim, ai) ||
!IsSameShaped(bim, bi) ||
!IsSameShaped(cim, ci))
if (!_IsSameShaped(aim, ai) ||
!_IsSameShaped(bim, bi) ||
!_IsSameShaped(cim, ci))
{
isUniform = false;
break;
......
......@@ -78,7 +78,7 @@ void _funcName(const XTensor * a, XTensor * b, T num)
_cudaFuncName(a, b, num); \
return; \
} \
CheckNTErrors((IsSameShaped(a, b)), \
CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same data type!"); \
if (a->dataType == X_INT) { \
int * d = (int*)a->data; \
......@@ -113,7 +113,7 @@ void _funcName(const XTensor * a, XTensor * b, T num)
if (a->devID >= 0) { \
ShowNTErrors("No GPU devices support!") \
} \
CheckNTErrors((IsSameShaped(a, b)), \
CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same data type!"); \
if (a->dataType == X_INT) { \
int * d = (int*)a->data; \
......@@ -182,7 +182,7 @@ template XTensor funcName<double>(const XTensor&, double);
template<class T> \
void funcName(const XTensor &a, XTensor &b, T num) \
{ \
if (!b.isInit || !IsSameShaped(&a, &b)) { \
if (!b.isInit || !IsSameShaped(a, b)) { \
InitTensor(&b, &a); \
} \
_funcName(&a, &b, num); \
......
......@@ -90,7 +90,7 @@ void Kernel##funcName(T1 * a, T1 * b, int size, T2 num)
template<class T> \
void _Cuda##funcName(const XTensor * a, XTensor * b, T num) \
{ \
CheckNTErrors((IsSameShaped(a, b)), \
CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \
CheckNTErrors((a->isSparse == false), "TODO!"); \
\
......
......@@ -44,7 +44,7 @@ void _Clip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper)
}
#endif
CheckNTErrors((IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((_IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!");
DTYPE * d = (DTYPE*)a->data;
......@@ -111,7 +111,7 @@ XTensor Clip(const XTensor & a, DTYPE lower, DTYPE upper)
void Clip(const XTensor & a, XTensor & b, DTYPE lower, DTYPE upper)
{
if (!b.isInit || !IsSameShaped(&a, &b)) {
if (!b.isInit || !IsSameShaped(a, b)) {
InitTensor(&b, &a);
}
......
......@@ -75,7 +75,7 @@ set each entry to its clip value
*/
void _CudaClip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper)
{
CheckNTErrors((IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((_IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->isSparse == false), "TODO!");
int gridSize[3];
......
......@@ -42,7 +42,7 @@ DTYPE myIsNotEqual(DTYPE a, DTYPE b)
#define _SIMPLE_COMPARE_FUNCTION(_funcName, _cudaFuncName, origFunc) \
void _funcName(const XTensor * a, XTensor * b, DTYPE number) \
{ \
CheckNTErrors((IsSameShaped(a, b)), \
CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!"); \
/* run it on GPUs */ \
......@@ -59,7 +59,7 @@ void _funcName(const XTensor * a, XTensor * b, DTYPE number)
#define _SIMPLE_COMPARE_FUNCTION(_funcName, origFunc) \
void _funcName(const XTensor * a, XTensor * b, DTYPE number) \
{ \
CheckNTErrors((IsSameShaped(a, b)), \
CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!"); \
/* run it on GPUs */ \
......@@ -97,7 +97,7 @@ XTensor funcName(const XTensor &a, DTYPE number)
#define SIMPLE_COMPARE_FUNCTION_VOID(funcName, _funcName, operationId) \
void funcName(const XTensor &a, XTensor &b, DTYPE number) \
{ \
if (!b.isInit || !IsSameShaped(&a, &b)) { \
if (!b.isInit || !IsSameShaped(a, b)) { \
InitTensor(&b, &a); \
} \
_funcName(&a, &b, number); \
......
......@@ -48,9 +48,9 @@ void _Normalize(const XTensor * input, XTensor * output, int dim,
const XTensor * a, const XTensor * b, DTYPE epsilon)
{
int dimRDI = input->order - dim - 1;
CheckNTErrors((IsSameShaped(input, output)), "Unmatched input tensors!");
CheckNTErrors((IsSameShaped(a, b)), "Unmatched input tensors");
CheckNTErrors((IsSameShaped(mean, var)), "Unmatched input tensors");
CheckNTErrors((_IsSameShaped(input, output)), "Unmatched input tensors!");
CheckNTErrors((_IsSameShaped(a, b)), "Unmatched input tensors");
CheckNTErrors((_IsSameShaped(mean, var)), "Unmatched input tensors");
CheckNTErrors((input && output && mean && var && a && b), "Empty input tensors!");
CheckNTErrors((dimRDI >= 0 && dimRDI < input->order), "Incorrect reduction dimension!");
CheckNTErrors((input->order == mean->order + 1), "Incorrect reduction dimension!");
......@@ -204,7 +204,7 @@ void Normalize(const XTensor &input, XTensor &output, int dim,
const XTensor &mean, const XTensor &var,
const XTensor &a, const XTensor &b, DTYPE epsilon)
{
if (!output.isInit || !IsSameShaped(&input, &output)) {
if (!output.isInit || !IsSameShaped(input, output)) {
InitTensor(&output, &input);
}
......
......@@ -148,7 +148,7 @@ b = a * scale + shift
*/
void ScaleAndShift(const XTensor & a, XTensor & b, DTYPE scale, DTYPE shift)
{
if (!b.isInit || !IsSameShaped(&a, &b)) {
if (!b.isInit || !IsSameShaped(a, b)) {
InitTensor(&b, &a);
}
......
......@@ -78,7 +78,7 @@ void _funcName(const XTensor * a, XTensor * b)
_cudaFuncName(a, b); \
return; \
} \
CheckNTErrors((IsSameShaped(a, b)), \
CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \
if (a->dataType == X_INT) { \
int * d = (int*)a->data; \
......@@ -109,7 +109,7 @@ void _funcName(const XTensor * a, XTensor * b)
if (a->devID >= 0) { \
ShowNTErrors("No GPU devices support!") \
} \
CheckNTErrors((IsSameShaped(a, b)), \
CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \
if (a->dataType == X_INT) { \
int * d = (int*)a->data; \
......@@ -161,7 +161,7 @@ XTensor funcName(const XTensor & a)
#define SIMPLE_UNARY_FUNCTION_VOID(funcName, _funcName, operationId) \
void funcName(const XTensor & a, XTensor & b) \
{ \
if (!b.isInit || !IsSameShaped(&a, &b)) { \
if (!b.isInit || !IsSameShaped(a, b)) { \
InitTensor(&b, &a); \
} \
_funcName(&a, &b); \
......
......@@ -155,7 +155,7 @@ void Kernel##funcName(T * a, T * b, int size) \
} \
void _Cuda##funcName(const XTensor * a, XTensor * b) \
{ \
CheckNTErrors((IsSameShaped(a, b)), \
CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \
CheckNTErrors(a->isSparse == false, "TODO!"); \
\
......
......@@ -39,7 +39,7 @@ in the k-th grid
*/
void _CopyInGrid(const XTensor * s, XTensor * t, int * index, int blockDim, int blockNumInGrid, bool isIndexOnDev)
{
CheckNTErrors((IsSameShaped(s, t)), "Unmatched tensors!");
CheckNTErrors((_IsSameShaped(s, t)), "Unmatched tensors!");
int blockDimRDI = s->order - blockDim - 1;
int blockSize = 1;
......
......@@ -52,7 +52,7 @@ void _ReduceSum(const XTensor * input, XTensor * output, int dim, const XTensor
CheckNTErrors((input->order == output->order + 1), "Incorrect tensor sizes!");
CheckNTErrors((input->order > dim && dim >=0), "Illegal dimension to reduce!");
CheckNTErrors((input->dataType == output->dataType), "Unmatched data types!");
CheckNTErrors((shift == NULL || IsSameShaped(output, shift)), "Incorrect shift tensor size!");
CheckNTErrors((shift == NULL || _IsSameShaped(output, shift)), "Incorrect shift tensor size!");
int dimRDI = input->order - dim - 1;
CheckNTErrors(dimRDI >= 0, "Wrong dimension!");
......
......@@ -45,7 +45,7 @@ void _Concatenate(const TensorList * smalls, XTensor * big, int dim)
XTensor * a = (XTensor*)smalls->GetItem(i - 1);
XTensor * b = (XTensor*)smalls->GetItem(i);
CheckNTErrors((a && b), "Empty input tensors!");
if (!IsSameShaped(a, b))
if (!_IsSameShaped(a, b))
uniform = false;
}
......@@ -77,7 +77,7 @@ XTensor Concatenate(const TensorList &smalls, int dim)
XTensor * a = (XTensor*)smalls.GetItem(i - 1);
XTensor * b = (XTensor*)smalls.GetItem(i);
CheckNTErrors((a && b), "Empty input tensors!");
if (!IsSameShaped(a, b))
if (!_IsSameShaped(a, b))
uniform = false;
}
XTensor * tensor = (XTensor*)smalls.GetItem(0);
......@@ -190,7 +190,7 @@ void Concatenate(const TensorList & smalls, XTensor & big, int dim)
XTensor * a = (XTensor*)smalls.GetItem(i - 1);
XTensor * b = (XTensor*)smalls.GetItem(i);
CheckNTErrors((a && b), "Empty input tensors!");
if (!IsSameShaped(a, b))
if (!_IsSameShaped(a, b))
uniform = false;
}
......@@ -291,7 +291,7 @@ XTensor Concatenate(const XTensor &smallA, const XTensor &smallB, int dim)
XTensor * a = (XTensor*)smalls.Get(i - 1);
XTensor * b = (XTensor*)smalls.Get(i);
CheckNTErrors((a && b), "Empty input tensors!");
if (!IsSameShaped(a, b))
if (!_IsSameShaped(a, b))
uniform = false;
}
XTensor * tensor = (XTensor*)smalls.Get(0);
......
......@@ -30,7 +30,7 @@ check whether the two matrices are in the same type and size
>> b - anther tensor to compare with
<< return - whether the two input tensors are identical
*/
bool IsSameShaped(const XTensor * a, const XTensor * b)
bool _IsSameShaped(const XTensor * a, const XTensor * b)
{
if(a == NULL || b == NULL)
return false;
......@@ -56,15 +56,38 @@ bool IsSameShaped(const XTensor * a, const XTensor * b)
}
/*
check whether the two matrices are in the same type and size
>> a - input tensor
>> b - anther tensor to compare with
<< return - whether the two input tensors are identical
*/
bool IsSameShaped(const XTensor & a, const XTensor & b)
{
return _IsSameShaped(&a, &b);
}
/*
check whether the three matrices are in the same type and size
>> a - input tensor
>> b - anther tensor to compare with
>> c - a tensor again
<< return - whether the two input tensors are identical
*/
bool IsSameShaped(const XTensor * a, const XTensor * b, const XTensor * c)
bool _IsSameShaped(const XTensor * a, const XTensor * b, const XTensor * c)
{
return IsSameShaped(a, b) && IsSameShaped(a, c);
}
/*
check whether the three matrices are in the same type and size
>> a - input tensor
>> b - anther tensor to compare with
>> c - a tensor again
<< return - whether the two input tensors are identical
*/
bool IsSameShaped(const XTensor & a, const XTensor & b, const XTensor & c)
{
return _IsSameShaped(&a, &b, &c);
}
} // namespace nts(NiuTrans.Tensor)
\ No newline at end of file
......@@ -27,10 +27,16 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* judge whether the two matrices are in the same type and size */
bool IsSameShaped(const XTensor * a, const XTensor * b);
bool _IsSameShaped(const XTensor * a, const XTensor * b);
/* judge whether the two matrices are in the same type and size */
bool IsSameShaped(const XTensor & a, const XTensor & b);
/* judge whether the three matrices are in the same type and size */
bool _IsSameShaped(const XTensor * a, const XTensor * b, const XTensor * c);
/* judge whether the three matrices are in the same type and size */
bool IsSameShaped(const XTensor * a, const XTensor * b, const XTensor * c);
bool IsSameShaped(const XTensor & a, const XTensor & b, const XTensor & c);
} // namespace nts(NiuTrans.Tensor)
......
......@@ -434,7 +434,7 @@ merge two tensors into a big tensor (return an XTensor structure)
*/
XTensor Merge(const XTensor &smallA, const XTensor &smallB, int whereToMerge)
{
CheckNTErrors(IsSameShaped(&smallA, &smallB),
CheckNTErrors(IsSameShaped(smallA, smallB),
"The two tensors must be of the same size!");
int order = smallA.order;
......
......@@ -53,7 +53,7 @@ XTensor Reshape(XTensor &s, int order, int * dimSize)
void Reshape(XTensor &s, XTensor &t, int order, int * dimSize)
{
if (!t.isInit || !IsSameShaped(&t, &s)) {
if (!t.isInit || !IsSameShaped(t, s)) {
InitTensor(&t, &s);
}
......
......@@ -38,7 +38,7 @@ void _Squeeze(XTensor * source, XTensor * target, int leadingDim)
{
int order = target->order;
CheckNTErrors(IsSameShaped(source, target),
CheckNTErrors(_IsSameShaped(source, target),
"The source and target tensor must be of the same size!");
CheckNTErrors(leadingDim >= -1 && leadingDim < order,
"Wrong leading dimension");
......@@ -131,7 +131,7 @@ XTensor Squeeze(XTensor & source, int leadingDim)
void Squeeze(XTensor & source, XTensor & target, int leadingDim)
{
if (!target.isInit || !IsSameShaped(&source, &target)) {
if (!target.isInit || !IsSameShaped(source, target)) {
InitTensor(&target, &source);
}
......
......@@ -41,7 +41,7 @@ void _Sort(const XTensor * a, XTensor * b, XTensor * index, int dim)
{
dim = MODX(dim, a->order);
CheckNTErrors((IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((_IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((dim >= 0 && dim < a->order), "Incorrect dimension specified!");
CheckNTErrors((a->order == index->order), "Unmatched input tensors!");
CheckNTErrors((index->dataType == X_INT), "Wrong data type!");
......
......@@ -37,7 +37,7 @@ y = 1 if x > 1
*/
void _HardTanH(const XTensor * x, XTensor * y)
{
CheckNTErrors(IsSameShaped(x, y),
CheckNTErrors(_IsSameShaped(x, y),
"The input tensor and output tensor must have the same shape!")
#ifdef USE_CUDA
......@@ -88,7 +88,7 @@ XTensor HardTanH(const XTensor &x)
void HardTanH(const XTensor &x, XTensor &y)
{
if (!y.isInit || !IsSameShaped(&y, &x)) {
if (!y.isInit || !IsSameShaped(y, x)) {
InitTensor(&y, &x);
}
......
......@@ -34,7 +34,7 @@ identity function y = x
*/
void _Identity(const XTensor * x, XTensor * y)
{
CheckNTErrors(IsSameShaped(x, y),
CheckNTErrors(_IsSameShaped(x, y),
"The input tensor and output tensor must have the same shape!")
_CopyValues(x, y);
}
......@@ -64,7 +64,7 @@ XTensor Identity(const XTensor &x)
void Identity(const XTensor &x, XTensor &y)
{
if (!y.isInit || !IsSameShaped(&y, &x)) {
if (!y.isInit || !IsSameShaped(y, x)) {
InitTensor(&y, &x);
}
......
......@@ -211,7 +211,7 @@ void LogSoftmax(const XTensor &x, XTensor &y, int leadDim)
if (ld < 0)
ld = x.order - 1;
if (!y.isInit || !IsSameShaped(&y, &x)) {
if (!y.isInit || !IsSameShaped(y, x)) {
InitTensor(&y, &x);
}
......@@ -353,7 +353,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
}
}
else {
CheckNTErrors((IsSameShaped(gold, y)), "The tensors must be of the same size!");
CheckNTErrors((_IsSameShaped(gold, y)), "The tensors must be of the same size!");
for (int k = 0; k < blockNum; k++) {
gp = (DTYPE*)gold->data + k * blockSize;
op = (DTYPE*)y->data + k * blockSize;
......@@ -407,7 +407,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
}
}
else {
CheckNTErrors((IsSameShaped(gold, y)), "The tensors must be of the same size!");
CheckNTErrors((_IsSameShaped(gold, y)), "The tensors must be of the same size!");
for (int k = 0; k < blockNum; k++) {
gp = (DTYPE*)gold->data + k * blockSize;
op = (DTYPE*)y->data + k * blockSize;
......
......@@ -431,7 +431,7 @@ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
dedx->dimSize[0], dedx->dimSize[1], gold->unitNumNonZero, lossName);
}
else {
CheckNTErrors((IsSameShaped(gold, y)), "The tensors must be of the same size!");
CheckNTErrors((_IsSameShaped(gold, y)), "The tensors must be of the same size!");
for (int k = 0; k < blockNum; k++) {
GDevs.GetCudaThread(x->devID, blockSize, cudaGridSize, cudaBlockSize);
......
......@@ -49,7 +49,7 @@ DTYPE _LossCompute(XTensor * gold, XTensor * output, LOSS_FUNCTION_NAME LFName,
DTYPE error = 0.0F;
if (output->devID < 0) {
CheckNTErrors((gLen >= 0 && gLen <= output->unitNum), "Illegal input length!");
CheckNTErrors((IsSameShaped(gold, output)), "The input tensors must be of the same size!");
CheckNTErrors((_IsSameShaped(gold, output)), "The input tensors must be of the same size!");
CheckNTErrors((gold->dimSizeRDI[0] == 1 && output->dimSizeRDI[0] == 1), "TODO!");
CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!");
CheckNTErrors((gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE), "TODO!");
......@@ -206,7 +206,7 @@ DTYPE _LossComputeForLogScale(XTensor * gold, XTensor * output,
int leadDim, int gBeg, int gLen, int oBeg)
{
CheckNTErrors(gLen >= 0 && gLen <= output->unitNum, "Illegal input length!");
CheckNTErrors(IsSameShaped(gold, output), "The input tensors must be of the same size!");
CheckNTErrors(_IsSameShaped(gold, output), "The input tensors must be of the same size!");
CheckNTErrors(gold->dimSizeRDI[0] == 1 && output->dimSizeRDI[0] == 1, "TODO!");
CheckNTErrors(gold->order > leadDim && leadDim >= 0, "Illegal leading dimension!");
CheckNTErrors(gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE, "TODO!");
......@@ -402,7 +402,7 @@ void _LossBackward(XTensor * dedy, XTensor * t, XTensor * y,
if (y->devID < 0) {
CheckNTErrors(tLen <= y->unitNum, "Illegal input length!");
CheckNTErrors(IsSameShaped(t, y)&& IsSameShaped(dedy, y),
CheckNTErrors(_IsSameShaped(t, y)&& _IsSameShaped(dedy, y),
"The input tensors must be of the same size!");
CheckNTErrors((dedy->devID == t->devID) && (dedy->devID == y->devID),
"Tensor must be on the same device!");
......
......@@ -55,7 +55,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
bool isLogOutput, int leadDim, int gBeg, int gLen, int yBeg)
{
CheckNTErrors((gLen >= 0 && gLen <= y->unitNum), "Illegal input length!");
CheckNTErrors((IsSameShaped(gold, y)), "The input tensors must be of the same size!");
CheckNTErrors((_IsSameShaped(gold, y)), "The input tensors must be of the same size!");
CheckNTErrors((gold->dimSizeRDI[0] == 1 && y->dimSizeRDI[0] == 1), "TODO!");
CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!");
CheckNTErrors((gold->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE), "TODO!");
......@@ -332,7 +332,7 @@ void _CudaLossBackward(XTensor * dedy, XTensor * t, XTensor * y,
int leadDim, int tBeg, int tLen, int yBeg)
{
CheckNTErrors((tLen <= y->unitNum), "Illegal input length!");
CheckNTErrors((IsSameShaped(t, y)&& IsSameShaped(dedy, y)),
CheckNTErrors((_IsSameShaped(t, y)&& _IsSameShaped(dedy, y)),
"The input tensors must be of the same size!");
CheckNTErrors(((dedy->devID == t->devID) && (dedy->devID == y->devID)),
"Tensor must be on the same device!");
......
......@@ -33,7 +33,7 @@ rectify function y = max(0, x)
*/
void _Rectify(const XTensor * x, XTensor * y)
{
CheckNTErrors(IsSameShaped(x, y),
CheckNTErrors(_IsSameShaped(x, y),
"The input tensor and output tensor must have the same shape!")
#ifdef USE_CUDA
......@@ -80,7 +80,7 @@ XTensor Rectify(const XTensor &x)
void Rectify(const XTensor &x, XTensor &y)
{
if (!y.isInit || !IsSameShaped(&y, &x)) {
if (!y.isInit || !IsSameShaped(y, x)) {
InitTensor(&y, &x);
}
......
......@@ -35,7 +35,7 @@ sigmoid function y = 1/(1+exp(-x))
*/
void _Sigmoid(const XTensor * x, XTensor * y)
{
CheckNTErrors(IsSameShaped(x, y),
CheckNTErrors(_IsSameShaped(x, y),
"The input tensor and output tensor must have the same shape!")
#ifdef USE_CUDA
......@@ -83,7 +83,7 @@ XTensor Sigmoid(const XTensor &x)
void Sigmoid(const XTensor &x, XTensor &y)
{
if (!y.isInit || !IsSameShaped(&y, &x)) {
if (!y.isInit || !IsSameShaped(y, x)) {
InitTensor(&y, &x);
}
......
......@@ -157,7 +157,7 @@ void Softmax(const XTensor &x, XTensor &y, int leadDim)
if (ld < 0)
ld = x.order - 1;
if (!y.isInit || !IsSameShaped(&y, &x)) {
if (!y.isInit || !IsSameShaped(y, x)) {
InitTensor(&y, &x);
}
......@@ -253,7 +253,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
}
}
else{
CheckNTErrors((IsSameShaped(gold, y)), "The tensors must be of the same size!");
CheckNTErrors((_IsSameShaped(gold, y)), "The tensors must be of the same size!");
for(int k = 0; k < blockNum; k++){
gp = (DTYPE*)gold->data + k * blockSize;
op = (DTYPE*)y->data + k * blockSize;
......@@ -292,7 +292,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
}
}
else{
CheckNTErrors((IsSameShaped(gold, y)), "The tensors must be of the same size!");
CheckNTErrors((_IsSameShaped(gold, y)), "The tensors must be of the same size!");
for(int k = 0; k < blockNum; k++){
gp = (DTYPE*)gold->data + k * blockSize;
op = (DTYPE*)y->data + k * blockSize;
......
......@@ -224,7 +224,7 @@ void _CudaSoftmaxSumMax(const XTensor * x, XTensor * y, int leadDim, XTensor * s
{
CheckNTErrors((x->devID >= 0), "Forward computation of softmax must be run on GPUs.");
CheckNTErrors((x->devID == y->devID), "Tensors used in softmax are not on the same GPU.");
CheckNTErrors((IsSameShaped(x, y)), "Input tensors must be of the same size!");
CheckNTErrors((_IsSameShaped(x, y)), "Input tensors must be of the same size!");
int leadDimRDI = y->order - leadDim - 1;
int dimensionSize = y->dimSizeRDI[leadDimRDI];
......
......@@ -55,10 +55,10 @@ void _CrossEntropy(const XTensor * output, const XTensor * gold,
int unitNum = output->dimSize[n];
CheckNTErrors(n >= 0 && n < output->order, "Wrong leadingDim!");
CheckNTErrors(IsSameShaped(output, gold),
CheckNTErrors(_IsSameShaped(output, gold),
"The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == unitNum, "Wrong weight tensor!");
CheckNTErrors(padding == NULL || IsSameShaped(padding, loss),
CheckNTErrors(padding == NULL || _IsSameShaped(padding, loss),
"The loss tensor and padding tensor must be same shape!");
CheckNTErrors(loss->order == output->order - 1, "Wrong loss dimension!");
CheckNTErrors(gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE, "TODO!");
......@@ -102,11 +102,11 @@ void _CrossEntropyFast(const XTensor * output, const XTensor * gold,
CheckNTErrors(n >= 0 && n < output->order,
"Wrong leading dimension!");
CheckNTErrors(IsSameShaped(output, gold),
CheckNTErrors(_IsSameShaped(output, gold),
"The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize,
"Wrong weight tensor!");
CheckNTErrors(padding == NULL || IsSameShaped(padding, loss),
CheckNTErrors(padding == NULL || _IsSameShaped(padding, loss),
"The loss tensor and padding tensor must be same shape!");
CheckNTErrors(loss->order == output->order - 1,
"Wrong loss dimension!");
......@@ -338,7 +338,7 @@ DTYPE _CrossEntropy(const XTensor * output, const XTensor * gold,
int unitNum = output->dimSize[n];
CheckNTErrors(n >= 0 && n < output->order, "Wrong leadingDim!");
CheckNTErrors(IsSameShaped(output, gold),
CheckNTErrors(_IsSameShaped(output, gold),
"The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == unitNum, "Wrong weight tensor!");
CheckNTErrors(padding == NULL || padding->order == output->order - 1,
......@@ -413,7 +413,7 @@ DTYPE _CrossEntropyFast(const XTensor * output, const XTensor * gold,
CheckNTErrors(n >= 0 && n < output->order,
"Wrong leadingDim!");
CheckNTErrors(IsSameShaped(output, gold),
CheckNTErrors(_IsSameShaped(output, gold),
"The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize,
"Wrong weight tensor!");
......@@ -565,7 +565,7 @@ void _CrossEntropyBackward(XTensor * dedy, const XTensor * output,
CheckNTErrors(n >= 0 && n < output->order,
"Wrong leading dimension!");
CheckNTErrors(IsSameShaped(dedy, output, gold),
CheckNTErrors(_IsSameShaped(dedy, output, gold),
"The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize,
"Wrong weight tensor!");
......
......@@ -101,7 +101,7 @@ DTYPE _CudaCrossEntropyFast(const XTensor * output, const XTensor * gold,
CheckNTErrors(n >= 0 && n < output->order,
"Wrong leadingDim!");
CheckNTErrors(IsSameShaped(output, gold),
CheckNTErrors(_IsSameShaped(output, gold),
"The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize,
"Wrong weight tensor!");
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论