Commit 65bb83d8 by liyinqiao

Roll back some codes.

Find some bugs during training NMT system. We decide to roll back some codes to the last version. These codes need to be reviewed.
parent f9ea80d6
...@@ -975,7 +975,7 @@ void XMathGrad::GradPower(XTensor * node, bool isEfficient) ...@@ -975,7 +975,7 @@ void XMathGrad::GradPower(XTensor * node, bool isEfficient)
XTensor * tmp = NewTensorBufV2(a, a->devID, a->mem); XTensor * tmp = NewTensorBufV2(a, a->devID, a->mem);
_Power(a, tmp, p - 1.0F); _Power(a, tmp, p - 1.0F);
_ScaleMe(tmp, p); _ScaleAndShiftMe(tmp, p);
_Multiply(node->grad, tmp, a->grad, 1.0F); _Multiply(node->grad, tmp, a->grad, 1.0F);
DelTensorBuf(tmp); DelTensorBuf(tmp);
......
...@@ -127,14 +127,7 @@ public: ...@@ -127,14 +127,7 @@ public:
void ReadFromFile(FILE* fp, int num); void ReadFromFile(FILE* fp, int num);
/* short */ /* short */
T& operator[] (int i) { T& operator[] (int i) const { return GetItem(i); };
CheckNTErrors(i >= -count && i < count, "Index of a list item is out of scope!");
CheckNTErrors(count > 0, "Cannt index the item in an empty list!");
if (i < 0)
return items[count + i];
else
return items[i];
};
T& Get(int i) const { return GetItem(i); }; T& Get(int i) const { return GetItem(i); };
void Set(int i, T item) { SetItem(i, item); }; void Set(int i, T item) { SetItem(i, item); };
}; };
......
...@@ -92,10 +92,6 @@ XTensor funcName(const XTensor &a, DTYPE number) ...@@ -92,10 +92,6 @@ XTensor funcName(const XTensor &a, DTYPE number)
XTensor b(&a); \ XTensor b(&a); \
b.SetTMPFlag(); \ b.SetTMPFlag(); \
_funcName(&a, &b, number); \ _funcName(&a, &b, number); \
if (a.enableGrad) { \
XLink::MakeLink(&a, NULL, &b, operationId); \
XLink::AddParamToHead(&b, (DTYPE)number); \
} \
return b; \ return b; \
} }
...@@ -106,10 +102,6 @@ void funcName(const XTensor &a, XTensor &b, DTYPE number) ...@@ -106,10 +102,6 @@ void funcName(const XTensor &a, XTensor &b, DTYPE number)
InitTensorV2(&b, &a); \ InitTensorV2(&b, &a); \
} \ } \
_funcName(&a, &b, number); \ _funcName(&a, &b, number); \
if (a.enableGrad) { \
XLink::MakeLink(&a, NULL, &b, operationId); \
XLink::AddParamToHead(&b, (DTYPE)number); \
} \
} }
// I think we needn't to make link. // I think we needn't to make link.
...@@ -194,9 +186,6 @@ XTensor funcName(const XTensor & a, const XTensor & b) ...@@ -194,9 +186,6 @@ XTensor funcName(const XTensor & a, const XTensor & b)
XTensor c(&a); \ XTensor c(&a); \
c.SetTMPFlag(); \ c.SetTMPFlag(); \
_funcName(&a, &b, &c); \ _funcName(&a, &b, &c); \
if (a.enableGrad && b.enableGrad) { \
XLink::MakeLink(&a, &b, &c, operationId); \
} \
return c; \ return c; \
} }
...@@ -207,9 +196,6 @@ void funcName(const XTensor &a, const XTensor &b, XTensor c) ...@@ -207,9 +196,6 @@ void funcName(const XTensor &a, const XTensor &b, XTensor c)
InitTensor(&c, &a); \ InitTensor(&c, &a); \
} \ } \
_funcName(&a, &b, &c); \ _funcName(&a, &b, &c); \
if (a.enableGrad && b.enableGrad) { \
XLink::MakeLink(&a, &b, &c, operationId); \
} \
} }
#ifdef USE_CUDA #ifdef USE_CUDA
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论