Commit fe1a0bf7 by xiaotong

code for tensor order = -1 (uninitialized)

parent 4e2bc4cf
......@@ -129,7 +129,6 @@ void XNodeGrad::ComputeMath(XTensor * node)
CheckNTErrors(income.tailNum == 2, "Wrong input tensor number for MULTIPLY!");
XTensor * a = income.tails[0];
XTensor * b = income.tails[1];
int leadDim = income.GetParamInt(0);
XNoder::MakeGrad(a);
XNoder::MakeGrad(b);
......@@ -142,4 +141,4 @@ void XNodeGrad::ComputeMath(XTensor * node)
}
}
}
\ No newline at end of file
}
......@@ -134,8 +134,6 @@ constructor
XTensor::XTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType,
const float myDenseRatio, int myDevID, XMem * myMem)
{
CheckNTErrors((myOrder > 0), "Illegal tensor order1");
Init();
SetDataPointer();
......@@ -144,7 +142,8 @@ XTensor::XTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYP
mem = myMem;
devID = myMem != NULL ? myMem->devID : myDevID;
Resize(myOrder, myDimSize, myDataType, myDenseRatio);
if(order >= 0)
Resize(myOrder, myDimSize, myDataType, myDenseRatio);
}
/* copy constructor */
......@@ -1999,10 +1998,12 @@ generate a copy of XTensor
*/
XTensor * NewTensor(XTensor * a, bool isFilledData)
{
int dims[MAX_TENSOR_DIM_NUM];
CheckNTErrors((a != NULL), "Empty input!");
int * dims = new int[a->order];
memcpy(dims, a->dimSize, sizeof(int) * a->order);
if(a->order > 0)
memcpy(dims, a->dimSize, sizeof(int) * a->order);
if(!isFilledData)
dims[0] = -dims[0];
......@@ -2011,8 +2012,6 @@ XTensor * NewTensor(XTensor * a, bool isFilledData)
a->dataType, a->denseRatio,
a->devID, a->mem);
delete[] dims;
return newTensor;
}
......
......@@ -204,12 +204,11 @@ DTYPE LossComputeForLogScale(XTensor * gold, XTensor * output,
LOSS_FUNCTION_NAME LFName,
int leadDim, int gBeg, int gLen, int oBeg)
{
CheckNTErrors((gLen >= 0 && gLen <= output->unitNum), "Illegal input length!");
CheckNTErrors((XTensor::IsIdentical(gold, output)), "The input tensors must be of the same size!");
CheckNTErrors((gold->dimSizeRDI[0] == 1 && output->dimSizeRDI[0] == 1), "TODO!");
CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!");
CheckNTErrors((gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE),
"TODO!");
CheckNTErrors(gLen >= 0 && gLen <= output->unitNum, "Illegal input length!");
CheckNTErrors(XTensor::IsIdentical(gold, output), "The input tensors must be of the same size!");
CheckNTErrors(gold->dimSizeRDI[0] == 1 && output->dimSizeRDI[0] == 1, "TODO!");
CheckNTErrors(gold->order > leadDim && leadDim >= 0, "Illegal leading dimension!");
CheckNTErrors(gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE, "TODO!");
int leadDimRDI = output->order - leadDim - 1;
int dimensionSize = output->dimSizeRDI[leadDimRDI];
......@@ -384,14 +383,16 @@ void LossBackward(XTensor * dedy, XTensor * t, XTensor * y,
LOSS_FUNCTION_NAME LFName,
int leadDim, int tBeg, int tLen, int yBeg)
{
if(t->order < 0)
return;
if (y->devID < 0) {
CheckNTErrors((tLen <= y->unitNum), "Illegal input length!");
CheckNTErrors((XTensor::IsIdentical(t, y)&& XTensor::IsIdentical(dedy, y)),
CheckNTErrors(tLen <= y->unitNum, "Illegal input length!");
CheckNTErrors(XTensor::IsIdentical(t, y)&& XTensor::IsIdentical(dedy, y),
"The input tensors must be of the same size!");
CheckNTErrors(((dedy->devID == t->devID) && (dedy->devID == y->devID)), "Tensor must be on the same device!");
CheckNTErrors((t->order > leadDim), "Illegal leading dimension!");
CheckNTErrors((t->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE),
"TODO!");
CheckNTErrors((dedy->devID == t->devID) && (dedy->devID == y->devID), "Tensor must be on the same device!");
CheckNTErrors(t->order > leadDim, "Illegal leading dimension!");
CheckNTErrors(t->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE, "TODO!");
int leadDimRDI = leadDim >= 0 ? y->order - leadDim - 1 : -1;
if(leadDimRDI < 0){
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论