Commit ad3fc86f by xiaotong

rename SetTMP

parent 22df3e17
......@@ -202,7 +202,7 @@ XTensor::~XTensor()
dims[0] = -dims[0];
XTensor * newTensor = new XTensor(order, dims, dataType, denseRatio, devID, mem);
newTensor->SetTMP();
newTensor->SetTMPFlag();
newTensor->data = data;
data = NULL;
......@@ -244,6 +244,7 @@ void XTensor::Init()
isInit = false;
isTmp = false;
isGrad = false;
isVar = false;
visitMark = 0;
grad = NULL;
}
......@@ -297,7 +298,7 @@ XTensor& XTensor::operator= (const XTensor& tensor)
dims[0] = -dims[0];
XTensor * newTensor = new XTensor(order, dims, dataType, denseRatio, devID, mem);
newTensor->SetTMP();
newTensor->SetTMPFlag();
newTensor->data = data;
newTensor->dataHost = dataHost;
newTensor->signature = tensor.signature;
......@@ -1125,7 +1126,7 @@ int XTensor::GetNonzeroSize()
set the tensor as "temporary"
>> myIsTMP - the flag
*/
void XTensor::SetTMP(bool myIsTmp)
void XTensor::SetTMPFlag(bool myIsTmp)
{
isTmp = myIsTmp;
}
......@@ -1134,12 +1135,23 @@ void XTensor::SetTMP(bool myIsTmp)
set the tensor as "keep-gradient"
>> myIsGrad - the flag
*/
void XTensor::SetGrad(bool myIsGrad)
void XTensor::SetGradFlag(bool myIsGrad)
{
isGrad = myIsGrad;
}
/*
set the tensor as "variable"
>> myIsVar - the flag
*/
void XTensor::SetVarFlag(bool myIsVar)
{
isVar = myIsVar;
if(isVar)
SetGradFlag(true);
}
/*
resize a tensor with a specified tensor size
>> myOrder - order of the tensor
>> myDimSize - the size of each dimension
......
......@@ -145,6 +145,9 @@ public:
/* indicates whether the tensor keeps the gradient when used as model parameters */
bool isGrad;
/* indicates whether the tensor is used as paramters (or variables) */
bool isVar;
/* mark for traversing the gragh */
unsigned int visitMark;
......@@ -319,10 +322,13 @@ public:
int GetNonzeroSize();
/* set the tensor as "temporary" */
void SetTMP(bool myIsTmp = true);
void SetTMPFlag(bool myIsTmp = true);
/* set the tensor as "keep-gradient" */
void SetGrad(bool myIsGrad = true);
void SetGradFlag(bool myIsGrad = true);
/* set the tensor as "variable" */
void SetVarFlag(bool myIsVar = true);
/* resize a matrix with a specified matrix size */
bool Resize(const int myOrder, const int * myDimSize,
......
......@@ -181,7 +181,7 @@ where i is the index of the item
XTensor Div(const XTensor &a, const XTensor &b, DTYPE alpha, int leadingDim)
{
XTensor c(&a);
c.SetTMP();
c.SetTMPFlag();
int n = GetDivDimIndex(a, b);
......
......@@ -150,7 +150,7 @@ i.e., a is divided with b by broadcasting
XTensor DivDim(const XTensor &a, const XTensor &b, int n, DTYPE alpha)
{
XTensor c(&a);
c.SetTMP();
c.SetTMPFlag();
/* call _Div function */
_DivDim(&a, &b, &c, n, alpha);
......
......@@ -249,7 +249,7 @@ XTensor MatrixMul(const XTensor &a, MATRIX_TRANS_TYPE transposedA,
float dr = (!a.isSparse || !b.isSparse) ? 1.0F : MAX(a.denseRatio, b.denseRatio);
XTensor c(order, dimSize, a.dataType, dr, a.devID, a.mem);
c.SetTMP();
c.SetTMPFlag();
/* call _MatrixMul function */
_MatrixMul(&a, transposedA, &b, transposedB, &c, alpha, 0, parallelRunner);
......@@ -299,7 +299,7 @@ XTensor MatrixMul(const XTensor &a, const XTensor &b,
float dr = (!a.isSparse || !b.isSparse) ? 1.0F : MAX(a.denseRatio, b.denseRatio);
XTensor c(order, dimSize, a.dataType, dr, a.devID, a.mem);
c.SetTMP();
c.SetTMPFlag();
/* call _MatrixMul function */
_MatrixMul(&a, X_NOTRANS, &b, X_NOTRANS, &c, alpha, 0, parallelRunner);
......
......@@ -314,7 +314,7 @@ XTensor MatrixMulBatched(const XTensor &a, MATRIX_TRANS_TYPE transposedA, const
float dr = (!a.isSparse || !b.isSparse) ? 1.0F : MAX(a.denseRatio, b.denseRatio);
XTensor c(order, dimSize, a.dataType, dr, a.devID, a.mem);
c.SetTMP();
c.SetTMPFlag();
/*call _MatrixMulBatched function */
_MatrixMulBatched(&a, transposedA, &b, transposedB, &c, alpha, 0, parallelRunner);
......@@ -370,7 +370,7 @@ XTensor MatrixMulBatched(const XTensor &a, const XTensor &b,
float dr = (!a.isSparse || !b.isSparse) ? 1.0F : MAX(a.denseRatio, b.denseRatio);
XTensor c(order, dimSize, a.dataType, dr, a.devID, a.mem);
c.SetTMP();
c.SetTMPFlag();
/*call _MatrixMulBatched function */
_MatrixMulBatched(&a, X_NOTRANS, &b, X_NOTRANS, &c, alpha, 0, parallelRunner);
......
......@@ -182,7 +182,7 @@ XTensor Multiply(const XTensor &a, const XTensor &b, DTYPE alpha, int leadingDim
{
XTensor c(&a);
c.SetTMP();
c.SetTMPFlag();
int n = GetMultiplyDimIndex(a, b);
......
......@@ -148,7 +148,7 @@ i.e., a is multiplied with b by broadcasting
XTensor MultiplyDim(const XTensor &a, const XTensor &b, int n, DTYPE alpha)
{
XTensor c(&a);
c.SetTMP();
c.SetTMPFlag();
/* call _Multiply function */
_MultiplyDim(&a, &b, &c, n, alpha);
......
......@@ -68,7 +68,7 @@ make a new tensor to keep the result and return it
XTensor Negate(const XTensor & a)
{
XTensor b(&a);
b.SetTMP();
b.SetTMPFlag();
/* call _Negate function */
_Negate(&a, &b);
......
......@@ -74,7 +74,7 @@ make a new tensor to keep the result and return it
XTensor Sign(const XTensor & a)
{
XTensor b(&a);
b.SetTMP();
b.SetTMPFlag();
/* call _Sign function */
_Sign(&a, &b);
......
......@@ -164,7 +164,7 @@ make a new tensor c to keep the result and return it
XTensor Sub(const XTensor &a, const XTensor &b, DTYPE beta)
{
XTensor c(&a);
c.SetTMP();
c.SetTMPFlag();
int n = GetSubDimIndex(a, b);
......
......@@ -150,7 +150,7 @@ i.e., a is subtracted with b by broadcasting
XTensor SubDim(const XTensor &a, const XTensor &b, int n, DTYPE beta)
{
XTensor c(&a);
c.SetTMP();
c.SetTMPFlag();
/* call _Sub function */
_SubDim(&a, &b, &c, n, beta);
......
......@@ -169,7 +169,7 @@ make a new tensor c to keep the result and return it
XTensor Sum(const XTensor &a, const XTensor &b, DTYPE beta)
{
XTensor c(&a);
c.SetTMP();
c.SetTMPFlag();
int n = GetSumDimIndex(a, b);
......
......@@ -150,7 +150,7 @@ i.e., a is summed with b by broadcasting
XTensor SumDim(const XTensor &a, const XTensor &b, int n, DTYPE beta)
{
XTensor c(&a);
c.SetTMP();
c.SetTMPFlag();
/* call _Sum function */
_SumDim(&a, &b, &c, n, beta);
......
......@@ -111,7 +111,7 @@ XTensor SelectRange(const XTensor &a, int dim, int low, int high)
float dr = (!a.isSparse) ? 1.0F : a.denseRatio;
XTensor c(order, dimSize, a.dataType, dr, a.devID, a.mem);
c.SetTMP();
c.SetTMPFlag();
/* call _SelectRange function */
_SelectRange(&a, &c, dim, low, high);
......
......@@ -81,7 +81,7 @@ make a new tensor to keep the result and return it
XTensor Clip(const XTensor & a, DTYPE lower, DTYPE upper)
{
XTensor b(&a);
b.SetTMP();
b.SetTMPFlag();
/* call _Clip function */
_Clip(&a, &b, lower, upper);
......
......@@ -132,7 +132,7 @@ where a and b are the scalar and bias respectively, and \epsilon is the adjustme
XTensor Normalize(const XTensor &input, int dim, const XTensor &mean, const XTensor &var, const XTensor &a, const XTensor &b, DTYPE epsilon)
{
XTensor output(&input);
output.SetTMP();
output.SetTMPFlag();
/* call _Normalize function */
_Normalize(&input, &output, dim, &mean, &var, &a, &b, epsilon);
......
......@@ -90,7 +90,7 @@ make a new tensor to keep the result and return it
XTensor Power(const XTensor & a, DTYPE p)
{
XTensor b(&a);
b.SetTMP();
b.SetTMPFlag();
/* call _Power function */
_Power(&a, &b, p);
......
......@@ -105,7 +105,7 @@ b = a * scale + shift
XTensor ScaleAndShift(const XTensor &a, DTYPE scale, DTYPE shift)
{
XTensor b(&a);
b.SetTMP();
b.SetTMPFlag();
/* call _ScaleAndShift function */
_ScaleAndShift(&a, &b, scale, shift);
......
......@@ -65,7 +65,7 @@ void _funcNameMe(XTensor * a) \
XTensor funcName(const XTensor &a) \
{ \
XTensor b(&a); \
b.SetTMP(); \
b.SetTMPFlag(); \
_funcName(&a, &b); \
XLink::MakeLink(&a, NULL, &b, operationId); \
return b; \
......@@ -140,7 +140,7 @@ void _funcNameMe(XTensor * a) \
XTensor funcName(const XTensor &a) \
{ \
XTensor b(&a); \
b.SetTMP(); \
b.SetTMPFlag(); \
_funcName(&a, &b); \
XLink::MakeLink(&a, NULL, &b, operationId); \
return b; \
......
......@@ -130,7 +130,7 @@ XTensor CopyIndexed(const XTensor &s, int dim, int * srcIndex, int indexSize, in
float dr = (!s.isSparse) ? 1.0F : s.denseRatio;
XTensor t(order, dimSize, s.dataType, dr, s.devID, s.mem);
t.SetTMP();
t.SetTMPFlag();
/* call _CopyIndexed function */
_CopyIndexed(&s, &t, dim, srcIndex, indexSize, tgtIndex, copyNum);
......
......@@ -108,7 +108,7 @@ make a new tensor to keep the result and return it
XTensor CopyValues(const XTensor &s, XStream * stream)
{
XTensor t(&s);
t.SetTMP();
t.SetTMPFlag();
/* call _CopyValues function */
_CopyValues(&s, &t, stream);
......
......@@ -114,7 +114,7 @@ XTensor ReduceMax(const XTensor &input, int dim)
float dr = (!input.isSparse) ? 1.0F : input.denseRatio;
XTensor output(order, dimSize, input.dataType, dr, input.devID, input.mem);
output.SetTMP();
output.SetTMPFlag();
/* call _ReduceMax function */
_ReduceMax(&input, &output, dim);
......
......@@ -71,7 +71,7 @@ XTensor ReduceMean(const XTensor &input, int dim)
float dr = (!input.isSparse) ? 1.0F : input.denseRatio;
XTensor output(order, dimSize, input.dataType, dr, input.devID, input.mem);
output.SetTMP();
output.SetTMPFlag();
/* call _ReduceMean function */
_ReduceMean(&input, &output, dim);
......
......@@ -225,7 +225,7 @@ XTensor ReduceSum(const XTensor &input, int dim, const XTensor &shift, DTYPE pow
float dr = (!input.isSparse) ? 1.0F : input.denseRatio;
XTensor output(order, dimSize, input.dataType, dr, input.devID, input.mem);
output.SetTMP();
output.SetTMPFlag();
/* call _ReduceSum function */
_ReduceSum(&input, &output, dim, &shift, power, isExp);
......@@ -271,7 +271,7 @@ XTensor ReduceSum(const XTensor &input, int dim, DTYPE power, bool isExp)
float dr = (!input.isSparse) ? 1.0F : input.denseRatio;
XTensor output(order, dimSize, input.dataType, dr, input.devID, input.mem);
output.SetTMP();
output.SetTMPFlag();
/* call _ReduceSum function */
_ReduceSum(&input, &output, dim, NULL, power, isExp);
......
......@@ -67,7 +67,7 @@ XTensor ReduceSumSquared(const XTensor &input, int dim, const XTensor &shift)
float dr = (!input.isSparse) ? 1.0F : input.denseRatio;
XTensor output(order, dimSize, input.dataType, dr, input.devID, input.mem);
output.SetTMP();
output.SetTMPFlag();
/* call _ReduceSumSquared function */
_ReduceSumSquared(&input, &output, dim, &shift);
......
......@@ -70,7 +70,7 @@ XTensor ReduceVariance(const XTensor &input, int dim, const XTensor &mean)
float dr = (!input.isSparse) ? 1.0F : input.denseRatio;
XTensor output(order, dimSize, input.dataType, dr, input.devID, input.mem);
output.SetTMP();
output.SetTMPFlag();
/* call _ReduceVariance function */
_ReduceVariance(&input, &output, dim, &mean);
......
......@@ -93,7 +93,7 @@ XTensor Concatenate(const XList &smalls, int dim)
float dr = (!tensor->isSparse) ? 1.0F : tensor->denseRatio;
XTensor big(order, dimSize, tensor->dataType, dr, tensor->devID, tensor->mem);
big.SetTMP();
big.SetTMPFlag();
/* call _Merge function */
_Merge(&smalls, &big, dim);
......@@ -121,7 +121,7 @@ XTensor Concatenate(const XList &smalls, int dim)
float dr = (!tensor->isSparse) ? 1.0F : tensor->denseRatio;
XTensor big(order, dimSize, tensor->dataType, dr, tensor->devID, tensor->mem);
big.SetTMP();
big.SetTMPFlag();
/* call _ConcatenateSolely function */
_ConcatenateSolely(&smalls, &big, dim);
......@@ -194,7 +194,7 @@ XTensor Concatenate(const XTensor &smallA, const XTensor &smallB, int dim)
float dr = (!tensor->isSparse) ? 1.0F : tensor->denseRatio;
XTensor big(order, dimSize, tensor->dataType, dr, tensor->devID, tensor->mem);
big.SetTMP();
big.SetTMPFlag();
/* call _Merge function */
_Merge(&smalls, &big, dim);
......@@ -222,7 +222,7 @@ XTensor Concatenate(const XTensor &smallA, const XTensor &smallB, int dim)
float dr = (!tensor->isSparse) ? 1.0F : tensor->denseRatio;
XTensor big(order, dimSize, tensor->dataType, dr, tensor->devID, tensor->mem);
big.SetTMP();
big.SetTMPFlag();
/* call _ConcatenateSolely function */
_ConcatenateSolely(&smalls, &big, dim);
......
......@@ -183,7 +183,7 @@ XTensor Merge(const XTensor &s, int whereToMerge, int leadingDim)
float dr = (!s.isSparse) ? 1.0F : s.denseRatio;
XTensor t(order, dimSize, s.dataType, dr, s.devID, s.mem);
t.SetTMP();
t.SetTMPFlag();
/* call _Merge function */
_Merge(&s, &t, whereToMerge, leadingDim);
......@@ -334,7 +334,7 @@ XTensor Merge(const XList &smalls, int whereToMerge)
float dr = (!tensor->isSparse) ? 1.0F : tensor->denseRatio;
XTensor big(order, dimSize, tensor->dataType, dr, tensor->devID, tensor->mem);
big.SetTMP();
big.SetTMPFlag();
/* call _Merge function */
_Merge(&smalls, &big, whereToMerge);
......@@ -371,7 +371,7 @@ XTensor Merge(const XTensor &smallA, const XTensor &smallB, int whereToMerge)
float dr = (!smallA.isSparse) ? 1.0F : smallA.denseRatio;
XTensor big(order, dimSize, smallA.dataType, dr, smallA.devID, smallA.mem);
big.SetTMP();
big.SetTMPFlag();
XList smalls(2);
smalls.Add(&smallA);
......
......@@ -184,7 +184,7 @@ XTensor Split(const XTensor &s, int whereToSplit, int splitNum)
float dr = (!s.isSparse) ? 1.0F : s.denseRatio;
XTensor t(order, dimSize, s.dataType, dr, s.devID, s.mem);
t.SetTMP();
t.SetTMPFlag();
/* call _Split function */
_Split(&s, &t, whereToSplit, splitNum);
......
......@@ -138,7 +138,7 @@ XTensor Transpose(const XTensor &a, const int i, const int j)
float dr = (!a.isSparse) ? 1.0F : a.denseRatio;
XTensor b(order, dimSize, a.dataType, dr, a.devID, a.mem);
b.SetTMP();
b.SetTMPFlag();
/* call _Transpose function */
_Transpose(&a, &b, i, j);
......
......@@ -122,7 +122,7 @@ XTensor Unsqueeze(const XTensor &a, int dim, int dSize)
float dr = (!a.isSparse) ? 1.0F : a.denseRatio;
XTensor b(order, dimSize, a.dataType, dr, a.devID, a.mem);
b.SetTMP();
b.SetTMPFlag();
/* call _Unsqueeze function */
_Unsqueeze(&a, &b, dim, dSize);
......
......@@ -72,7 +72,7 @@ y = 1 if x > 1
XTensor HardTanH(const XTensor &x)
{
XTensor y(&x);
y.SetTMP();
y.SetTMPFlag();
/* call _HardTanH function */
_HardTanH(&x, &y);
......
......@@ -46,7 +46,7 @@ make a new tensor to keep the result and return it
XTensor Identity(const XTensor &x)
{
XTensor y(&x);
y.SetTMP();
y.SetTMPFlag();
/* call _Identity function */
_Identity(&x, &y);
......
......@@ -181,7 +181,7 @@ XTensor LogSoftmax(const XTensor &x, int leadDim)
ld = x.order - 1;
XTensor y(&x);
y.SetTMP();
y.SetTMPFlag();
/* call _LogSoftmax function */
_LogSoftmax(&x, &y, ld);
......
......@@ -65,7 +65,7 @@ make a new tensor to keep the result and return it
XTensor Rectify(const XTensor &x)
{
XTensor y(&x);
y.SetTMP();
y.SetTMPFlag();
/* call _Rectify function */
_Rectify(&x, &y);
......
......@@ -63,7 +63,7 @@ make a new tensor to keep the result and return it
XTensor Sigmoid(const XTensor &x)
{
XTensor y(&x);
y.SetTMP();
y.SetTMPFlag();
/* call _Sigmoid function */
_Sigmoid(&x, &y);
......
......@@ -136,7 +136,7 @@ XTensor Softmax(const XTensor &x, int leadDim)
ld = x.order - 1;
XTensor y(&x);
y.SetTMP();
y.SetTMPFlag();
/* call _Softmax function */
_Softmax(&x, &y, ld);
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论