Commit 7d201bf2 by xiaotong

remove useless code and fix bugs

parent aac1ad5c
...@@ -175,8 +175,6 @@ void SumDimTest() ...@@ -175,8 +175,6 @@ void SumDimTest()
XTensor y; XTensor y;
XTensor z; XTensor z;
int loops = 2000;
int a = 5; int a = 5;
int b = 7; int b = 7;
int c = 3; int c = 3;
......
...@@ -118,11 +118,16 @@ void XMathGrad::GradSumDim(XTensor * node) ...@@ -118,11 +118,16 @@ void XMathGrad::GradSumDim(XTensor * node)
if(b->outgo.tailNum > 1){ if(b->outgo.tailNum > 1){
XTensor * bGradTMP = NewTensorBuf(b->grad, b->devID, b->mem); XTensor * bGradTMP = NewTensorBuf(b->grad, b->devID, b->mem);
_ReduceSum(node->grad, bGradTMP, 0); _ReduceSum(node->grad, bGradTMP, 0);
if(beta != 1.0F)
_ScaleAndShiftMe(bGradTMP, beta);
_Sum(bGradTMP, b->grad, b->grad); _Sum(bGradTMP, b->grad, b->grad);
DelTensorBuf(bGradTMP); DelTensorBuf(bGradTMP);
} }
else else{
_ReduceSum(node->grad, b->grad, 0); _ReduceSum(node->grad, b->grad, 0);
if(beta != 1.0F)
_ScaleAndShiftMe(b->grad, beta);
}
node->grad->Reshape(order, dimSize); node->grad->Reshape(order, dimSize);
} }
...@@ -150,11 +155,16 @@ void XMathGrad::GradSumDim(XTensor * node) ...@@ -150,11 +155,16 @@ void XMathGrad::GradSumDim(XTensor * node)
if(b->outgo.tailNum > 1){ if(b->outgo.tailNum > 1){
XTensor * bGradTMP = NewTensorBuf(b->grad, b->devID, b->mem); XTensor * bGradTMP = NewTensorBuf(b->grad, b->devID, b->mem);
_ReduceSum(interGrad, bGradTMP, 0); _ReduceSum(interGrad, bGradTMP, 0);
if(beta != 1.0F)
_ScaleAndShiftMe(bGradTMP, beta);
_Sum(bGradTMP, b->grad, b->grad); _Sum(bGradTMP, b->grad, b->grad);
DelTensorBuf(bGradTMP); DelTensorBuf(bGradTMP);
} }
else else{
_ReduceSum(interGrad, b->grad, 0); _ReduceSum(interGrad, b->grad, 0);
if(beta != 1.0F)
_ScaleAndShiftMe(b->grad, beta);
}
node->grad->Reshape(order, dimSize); node->grad->Reshape(order, dimSize);
......
...@@ -975,7 +975,6 @@ forward process (with tensor connections) ...@@ -975,7 +975,6 @@ forward process (with tensor connections)
*/ */
void ForwardAutoDiff(XTensor inputs[], XTensor &output, FNNModel &model) void ForwardAutoDiff(XTensor inputs[], XTensor &output, FNNModel &model)
{ {
int batchSize = inputs[0].GetDim(0);
int n = model.n; int n = model.n;
int depth = model.hDepth; int depth = model.hDepth;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-07-31 * $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-07-31
*/ */
#include <math.h>
#include "T2TAttention.h" #include "T2TAttention.h"
#include "T2TUtility.h" #include "T2TUtility.h"
#include "../../tensor/core/CHeader.h" #include "../../tensor/core/CHeader.h"
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-08-01 * $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-08-01
*/ */
#include <math.h>
#include "T2TEmbedding.h" #include "T2TEmbedding.h"
#include "T2TUtility.h" #include "T2TUtility.h"
#include "../../tensor/core/CHeader.h" #include "../../tensor/core/CHeader.h"
...@@ -51,7 +52,6 @@ void T2TEmbedder::InitModel(int argc, const char ** argv, int myDevID, XMem * my ...@@ -51,7 +52,6 @@ void T2TEmbedder::InitModel(int argc, const char ** argv, int myDevID, XMem * my
devID = myDevID; devID = myDevID;
mem = myMem; mem = myMem;
float minmax = 0;
int maxLength = 0; int maxLength = 0;
int d = 0; int d = 0;
......
...@@ -88,20 +88,20 @@ XTensor * AttEncoder::Make(XTensor * input) ...@@ -88,20 +88,20 @@ XTensor * AttEncoder::Make(XTensor * input)
for(int i = 0; i < nlayer; i++){ for(int i = 0; i < nlayer; i++){
XTensor * att; XTensor * att;
XTensor * res;
XTensor * ln; XTensor * ln;
XTensor * fnn; XTensor * fnn;
XTensor res;
/* self attention */ /* self attention */
att = attentions[i].Make(x, x, x); att = attentions[i].Make(x, x, x);
/* residual connection */ /* residual connection */
res = &Sum(*att, *x); res = Sum(*att, *x);
/* TODO: dropout */ /* TODO: dropout */
/* layer normalization */ /* layer normalization */
ln = layerNorms[i].Make(res); ln = layerNorms[i].Make(&res);
/* input of next layer */ /* input of next layer */
x = ln; x = ln;
...@@ -110,12 +110,12 @@ XTensor * AttEncoder::Make(XTensor * input) ...@@ -110,12 +110,12 @@ XTensor * AttEncoder::Make(XTensor * input)
fnn = fnns[i].Make(x); fnn = fnns[i].Make(x);
/* residual connection */ /* residual connection */
res = &Sum(*fnn, *x); res = Sum(*fnn, *x);
/* TODO: dropout */ /* TODO: dropout */
/* layer normalization */ /* layer normalization */
ln = layerNorms[i].Make(res); ln = layerNorms[i].Make(&res);
/* input of next layer */ /* input of next layer */
x = ln; x = ln;
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论