Commit 4a87ecc0 by xiaotong

code clean

parent 0c0a5e9a
......@@ -192,7 +192,9 @@ IntList SplitInt(const string& s, const string& delimiter)
IntList values;
auto indices = SplitToPos(s, delimiter);
for (int i = 0; i < indices.Size(); i++) {
values.Add(strtol(s.data() + indices[i], nullptr, 10));
/* this line is with problem. Why do we need an IntList to keep an int64*/
values.Add((int)strtol(s.data() + indices[i], nullptr, 10));
}
return values;
}
......@@ -208,4 +210,4 @@ FloatList SplitFloat(const string& s, const string& delimiter)
return values;
}
}
\ No newline at end of file
}
......@@ -226,7 +226,6 @@ XTensor Attention::MakeRPRAttention(XTensor& k, XTensor& q, XTensor& v,
XTensor qheads;
XTensor vheads;
const int batchSize = q.GetDim(0);
const int lenQ = q.GetDim(1);
const int lenKV = k.GetDim(1);
......@@ -402,4 +401,4 @@ void Cache::Reorder(XTensor& reorder)
value = AutoGather(value, reorder);
}
}
}
\ No newline at end of file
}
......@@ -48,8 +48,6 @@ void GLU::InitModel(Config& config)
{
devID = config.devID;
float minmax = 0;
inSize = config.modelSize;
outSize = config.modelSize;
......@@ -84,4 +82,4 @@ XTensor GLU::Make(XTensor& input)
return t1 * Sigmoid(t2);
}
}
\ No newline at end of file
}
......@@ -334,7 +334,7 @@ void Trainer::Validate(const char* fn, const char* ofn, Model* model)
}
int bSize = output.GetDim(0);
int length = output.GetDim(1);
//int length = output.GetDim(1);
labelOnehot = IndexToOnehot(label, vSizeTgt, 0);
lossTensor = CrossEntropy(output, labelOnehot, paddingDec);
......@@ -479,4 +479,4 @@ void Trainer::PrepareModel(Model* model)
adamBeta2T = 1.0F;
}
}
\ No newline at end of file
}
......@@ -322,7 +322,7 @@ void BeamSearch::Generate(StateBundle* prev, StateBundle* beam)
/* keep the most promising candidates in the beam */
TopK(score, scoreTopK, index, -1, beamSize, true);
float lp = LengthPenalizer::GNMT(beam->nstep, alpha);
//float lp = LengthPenalizer::GNMT(beam->nstep, alpha);
CopyValues(index, indexCPU);
CopyValues(index, preID);
......@@ -493,8 +493,8 @@ void BeamSearch::Collect(StateBundle* beam)
/* check if this is the first end symbol. It is false
if there have been end symbols in previously generated words. */
bool isCompleted = state.isCompleted &&
(state.last == NULL || !state.last->isCompleted);
//bool isCompleted = state.isCompleted &&
// (state.last == NULL || !state.last->isCompleted);
/* we push the hypothesis into the heap when it is completed */
if ((state.isEnd || state.isCompleted)) {
......@@ -557,7 +557,6 @@ void BeamSearch::Dump(IntList* output, XTensor* score)
}
}
int count = 0;
bool isCompleted = true;
/* we track the state from the end to the beginning */
......@@ -873,4 +872,4 @@ void GreedySearch::Search(Model* model, XTensor& input,
delete[] finishedFlags;
}
}
\ No newline at end of file
}
......@@ -155,7 +155,7 @@ void Translator::Translate(const char* ifn, const char* sfn,
batchLoader.outputBuffer.Add(emptyRes);
}
double startDump = GetClockSec();
//double startDump = GetClockSec();
/* reorder the result */
batchLoader.SortOutput();
......@@ -163,7 +163,7 @@ void Translator::Translate(const char* ifn, const char* sfn,
/* print the result to a file */
batchLoader.DumpRes(ofn);
double elapsed = GetClockSec() - startDump;
//double elapsed = GetClockSec() - startDump;
LOG("translation completed (word=%d, sent=%zu)",
wordCountTotal, batchLoader.inputBuffer.Size() + batchLoader.emptyLines.Size());
......@@ -196,4 +196,4 @@ void Translator::Dump(FILE* file, XTensor* output)
}
}
}
\ No newline at end of file
}
......@@ -34,14 +34,14 @@ void Vocab::Load(const string& src)
/* get the vocab size and the start id */
f >> vsz >> sid;
startID = stol(sid);
vocabSize = stol(vsz);
startID = (int)stol(sid);
vocabSize = (int)stol(vsz);
string word, id;
for (int i = 0; i < vocabSize - startID; i++) {
f >> word >> id;
word2id[word] = stol(id);
id2word[stol(id)] = word;
word2id[word] = (int)stol(id);
id2word[(int)stol(id)] = word;
}
f.close();
......@@ -75,4 +75,4 @@ void Vocab::CopyFrom(const Vocab& v)
id2word.insert(i2w);
}
}
\ No newline at end of file
}
......@@ -182,10 +182,11 @@ void XDevice::Reset()
XMem * mem = GMems.GetMem(devID);
mem->Free();
#ifdef USE_CUDA
int devIDReset = devID;
Clear();
#ifdef USE_CUDA
if (devIDReset >= 0) {
int devIDBackup = -1;
cudaGetDevice(&devIDBackup);
......@@ -195,6 +196,8 @@ void XDevice::Reset()
cudaSetDevice(devIDBackup);
}
#else
Clear();
#endif
}
......
......@@ -121,8 +121,8 @@ XTensor MulAndShift(const XTensor& x, MATRIX_TRANS_TYPE transposedX,
CheckNTErrors(x.order >= 2 && w.order >= 2, "Input tensors must have a order >= 2!");
int xn = transposedX == X_TRANS ? x.dimSize[x.order - 1] : x.dimSize[x.order - 2];
int xm = transposedX == X_TRANS ? x.dimSize[x.order - 2] : x.dimSize[x.order - 1];
int wn = transposedW == X_TRANS ? w.dimSize[w.order - 1] : w.dimSize[w.order - 2];
//int xm = transposedX == X_TRANS ? x.dimSize[x.order - 2] : x.dimSize[x.order - 1];
//int wn = transposedW == X_TRANS ? w.dimSize[w.order - 1] : w.dimSize[w.order - 2];
int wm = transposedW == X_TRANS ? w.dimSize[w.order - 2] : w.dimSize[w.order - 1];
int order = x.order + w.order - 2;
......@@ -179,4 +179,4 @@ XTensor MulAndShift(const XTensor& x, MATRIX_TRANS_TYPE transposedX,
return c;
}
}
\ No newline at end of file
}
......@@ -231,8 +231,8 @@ And this is a special spread function for backward computation of gather functio
*/
void _SpreadForGather(XTensor * source, XTensor * collection, XTensor * index)
{
int dim = 0;
int order = source->order;
//int dim = 0;
//int order = source->order;
CheckNTErrors(source->dataType == DEFAULT_DTYPE, "TODO!");
CheckNTErrors(collection->GetDim(-1) == source->GetDim(-1), "Illegal dimension!");
......@@ -272,4 +272,4 @@ void _SpreadForGather(XTensor * source, XTensor * collection, XTensor * index)
}
}
} // namespace nts(NiuTrans.Tensor)
\ No newline at end of file
} // namespace nts(NiuTrans.Tensor)
......@@ -72,7 +72,6 @@ sum all the items of the tensor (It should be optimized!)
void _ReduceSumAll(const XTensor * source, DTYPE * value)
{
int * dimSize = new int[MAX_TENSOR_DIM_NUM];
float dr = (!source->isSparse) ? 1.0F : source->denseRatio;
XTensor * target = NewTensorBufV2(0, dimSize, source->dataType, source->denseRatio, source->devID, source->mem);
target->SetTMPFlag();
......@@ -122,4 +121,4 @@ DTYPE ReduceSumAllValue(const XTensor & source)
return target.Get0D();
}
} // namespace nts(NiuTrans.Tensor)
\ No newline at end of file
} // namespace nts(NiuTrans.Tensor)
......@@ -43,13 +43,11 @@ void _Stack(const TensorList * smalls, XTensor * t, int dim)
int blockSize = 1;
int blockNum = 1;
int gridSize = 1;
int gridNum = 1;
XTensor * smallsItem0 = smalls->GetItem(0);
int unitNum = smallsItem0->unitNum;
//int unitNum = smallsItem0->unitNum;
int unitSize = smallsItem0->unitSize;
int itemSize = unitNum * unitSize;
for (int i = 0; i < smallsItem0->order; i++) {
if (i >= dim)
......@@ -129,7 +127,7 @@ bool CheckStackShape(const TensorList &smalls, XTensor &t, int dim)
XTensor * tensor = (XTensor*)smalls.GetItem(0);
int order = tensor->order;
for (int i = 0; i < tensor->order; i++) {
for (int i = 0; i < order; i++) {
if (i < dim) {
if (t.GetDim(i) != tensor->GetDim(i))
return false;
......
......@@ -215,12 +215,12 @@ bool TestConvertDataType3()
{0.5F, -4.0F},
{0.0F, 6.0F} };
DTYPE data2[2][3] = { {1.0F, 2.0F, 3.0F},
/*DTYPE data2[2][3] = { {1.0F, 2.0F, 3.0F},
{0.0F, 4.0F, 5.0F} };
DTYPE answer[3][3] = { {1.0F, -6.0F, -7.0F},
{0.5F, -15.0F, -18.5F},
{0.0F, 24.0F, 30.0F} };
{0.0F, 24.0F, 30.0F} };*/
/* CPU test */
bool cpuTest = true;
......
......@@ -67,7 +67,6 @@ bool TestGather1()
DTYPE answer[2][3] = { {0.0F, -1.0F, 2.0F},
{1.0F, 2.0F, 4.0F} };
int dim = 0;
int indexSize = 2;
int srcIndex[2] = {0, 2};
......
......@@ -422,7 +422,7 @@ bool TestSetData6()
for (int i = 0; i < order; i++)
unitNum *= dimSize[i];
DTYPE answer[5] = {5.2F, 3.2F, 1.2F, -0.8F, -2.8F};
//DTYPE answer[5] = {5.2F, 3.2F, 1.2F, -0.8F, -2.8F};
/* CPU test */
bool cpuTest = true;
......
......@@ -51,7 +51,7 @@ bool XWorkerJob::AddJobRefresh(XModel * paramKeeper)
XList args(1);
args.Add(paramKeeper);
queue.EnqueueJob(&XModel::Refresh, &args);
queue.EnqueueJob((void*)&paramKeeper->Refresh, &args);
return true;
}
......@@ -79,4 +79,4 @@ bool XWorkerJob::AddJobNeuralNet(void * func, void * net, XList * inputs, XList
return true;
}
} /* end of the nts (NiuTrans.Tensor) namespace */
\ No newline at end of file
} /* end of the nts (NiuTrans.Tensor) namespace */
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论