Commit 7d201bf2 by xiaotong

remove useless code and fix bugs

parent aac1ad5c
......@@ -175,8 +175,6 @@ void SumDimTest()
XTensor y;
XTensor z;
int loops = 2000;
int a = 5;
int b = 7;
int c = 3;
......
......@@ -118,11 +118,16 @@ void XMathGrad::GradSumDim(XTensor * node)
if(b->outgo.tailNum > 1){
XTensor * bGradTMP = NewTensorBuf(b->grad, b->devID, b->mem);
_ReduceSum(node->grad, bGradTMP, 0);
if(beta != 1.0F)
_ScaleAndShiftMe(bGradTMP, beta);
_Sum(bGradTMP, b->grad, b->grad);
DelTensorBuf(bGradTMP);
}
else
else{
_ReduceSum(node->grad, b->grad, 0);
if(beta != 1.0F)
_ScaleAndShiftMe(b->grad, beta);
}
node->grad->Reshape(order, dimSize);
}
......@@ -150,11 +155,16 @@ void XMathGrad::GradSumDim(XTensor * node)
if(b->outgo.tailNum > 1){
XTensor * bGradTMP = NewTensorBuf(b->grad, b->devID, b->mem);
_ReduceSum(interGrad, bGradTMP, 0);
if(beta != 1.0F)
_ScaleAndShiftMe(bGradTMP, beta);
_Sum(bGradTMP, b->grad, b->grad);
DelTensorBuf(bGradTMP);
}
else
else{
_ReduceSum(interGrad, b->grad, 0);
if(beta != 1.0F)
_ScaleAndShiftMe(b->grad, beta);
}
node->grad->Reshape(order, dimSize);
......
......@@ -975,7 +975,6 @@ forward process (with tensor connections)
*/
void ForwardAutoDiff(XTensor inputs[], XTensor &output, FNNModel &model)
{
int batchSize = inputs[0].GetDim(0);
int n = model.n;
int depth = model.hDepth;
......
......@@ -19,6 +19,7 @@
* $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-07-31
*/
#include <math.h>
#include "T2TAttention.h"
#include "T2TUtility.h"
#include "../../tensor/core/CHeader.h"
......
......@@ -19,6 +19,7 @@
* $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-08-01
*/
#include <math.h>
#include "T2TEmbedding.h"
#include "T2TUtility.h"
#include "../../tensor/core/CHeader.h"
......@@ -51,7 +52,6 @@ void T2TEmbedder::InitModel(int argc, const char ** argv, int myDevID, XMem * my
devID = myDevID;
mem = myMem;
float minmax = 0;
int maxLength = 0;
int d = 0;
......
......@@ -88,20 +88,20 @@ XTensor * AttEncoder::Make(XTensor * input)
for(int i = 0; i < nlayer; i++){
XTensor * att;
XTensor * res;
XTensor * ln;
XTensor * fnn;
XTensor res;
/* self attention */
att = attentions[i].Make(x, x, x);
/* residual connection */
res = &Sum(*att, *x);
res = Sum(*att, *x);
/* TODO: dropout */
/* layer normalization */
ln = layerNorms[i].Make(res);
ln = layerNorms[i].Make(&res);
/* input of next layer */
x = ln;
......@@ -110,12 +110,12 @@ XTensor * AttEncoder::Make(XTensor * input)
fnn = fnns[i].Make(x);
/* residual connection */
res = &Sum(*fnn, *x);
res = Sum(*fnn, *x);
/* TODO: dropout */
/* layer normalization */
ln = layerNorms[i].Make(res);
ln = layerNorms[i].Make(&res);
/* input of next layer */
x = ln;
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论