Commit 8cb65ef5 by xiaotong

clean the code

parent d2011ab6
......@@ -843,7 +843,6 @@ void XMathGrad::GradMultiplyBroadcast(XTensor * node, bool isEfficient)
XTensor * a = income.tails[0];
XTensor * b = income.tails[1];
DTYPE beta = income.GetParam(0);
XNoder::MakeGrad(a);
_MultiplyBroadcast(node->grad, b, a->grad, 1.0F);
......@@ -1311,7 +1310,7 @@ void XMathGrad::GradSumBroadcast(XTensor * node, bool isEfficient)
XTensor * a = income.tails[0];
XTensor * b = income.tails[1];
DTYPE beta = income.GetParam(0);
//DTYPE beta = income.GetParam(0);
XNoder::MakeGrad(a);
_Sum(a->grad, node->grad, a->grad);
......
......@@ -150,7 +150,6 @@ void T2TTrainer::Train(const char * fn, const char * validFN, const char * model
int wc = 0;
int ws =0;
int wordCount = 0;
int totalW;
int wordCountTotal = 0;
int wordCountBatch = 0;
bool isEnd = false;
......@@ -590,7 +589,6 @@ int T2TTrainer::LoadBuf(FILE * file, bool isSorted, int step)
/* distribute samples into buckets. In each bucket, sequences have
similar a length */
if (bucketSize > 0) {
int bucketCount = 0;
int low = 0;
int high = low + bucketSize;
int n = count - 1;
......@@ -947,7 +945,6 @@ int T2TTrainer::LoadBatchMT(FILE * file,
int sCount = sc/2;
int seqSize = 0;
int dimsDec[3] = {sCount, maxDec, vsDec};
InitTensor2D(batchEnc, sCount, maxEnc, X_INT, devID, mem);
InitTensor2D(paddingEnc, sCount, maxEnc, X_FLOAT, devID, mem);
......@@ -966,7 +963,6 @@ int T2TTrainer::LoadBatchMT(FILE * file,
int wCountEnc = 0;
int wCountDec = 0;
int wCountPad = 0;
int wGold = 0;
wCount = 0;
int * batchEncValues = new int[batchEnc->unitNum];
......
......@@ -25,6 +25,7 @@
#include "T2TModel.h"
#include "T2TUtility.h"
#include "T2TTrainer.h"
#include "T2TSearcher.h"
#include "../../tensor/XDevice.h"
#include "../../tensor/XUtility.h"
#include "../../tensor/XGlobal.h"
......
......@@ -111,10 +111,9 @@ void _IndexToOnehot(XTensor * index, XTensor * onehot, int size, float labelSmoo
onehot->SetZeroAll();
#ifdef USE_CUDA
float confidence = 1 - labelSmoothingP;
float lowconfidence = labelSmoothingP / size;
#ifdef USE_CUDA
if(onehot->devID >= 0 && index->devID >= 0) {
_CudaIndexToOnehot(index, onehot, size, confidence, lowconfidence);
return;
......@@ -165,4 +164,4 @@ XTensor IndexToOnehot(XTensor & index, int size, float labelSmoothingP)
return onehot;
}
} // namespace nts(NiuTrans.Tensor)
\ No newline at end of file
} // namespace nts(NiuTrans.Tensor)
......@@ -62,7 +62,7 @@ DTYPE _ReduceSumAll(const XTensor * source)
return r;
int order = source->order;
/*int order = source->order;
DTYPE summation;
XTensor * big = NewTensor(source);
......@@ -90,7 +90,7 @@ DTYPE _ReduceSumAll(const XTensor * source)
summation = big->Get1D(0);
delete big;
return summation;
return summation;*/
}
/*
......@@ -103,4 +103,4 @@ DTYPE ReduceSumAll(const XTensor & source)
return _ReduceSumAll(&source);
}
} // namespace nts(NiuTrans.Tensor)
\ No newline at end of file
} // namespace nts(NiuTrans.Tensor)
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论