Commit 6803ce02 by liyinqiao Committed by 李垠桥

Bug fixed.

1. Make link for ReduceSumAll function.
2. Fix the memory leak issue.
parent 84ea960a
...@@ -1398,11 +1398,32 @@ void XMathGrad::GradReduceSum(XTensor * node, bool isEfficient) ...@@ -1398,11 +1398,32 @@ void XMathGrad::GradReduceSum(XTensor * node, bool isEfficient)
/* /*
gradient for reduceSumAll gradient for reduceSumAll
for for
TODO c = reduceSumAll(a)
we have
dE/da = dE/dc * 1
>> node - the node (c) for backward computation
>> isEfficient - indicates whether the computation is in
an efficient manner
*/ */
void XMathGrad::GradReduceSumAll(XTensor * node, bool isEfficient) void XMathGrad::GradReduceSumAll(XTensor * node, bool isEfficient)
{ {
ShowNTErrors("TODO!"); XLink &income = node->income;
CheckNTErrors(income.tailNum == 1, "Wrong input tensor number for Reduce!");
XTensor * a = income.tails[0];
XTensor * b = NewTensorBufV2(a, a->devID, a->mem);
XNoder::MakeGrad(a);
DTYPE value = node->grad->Get0D();
_SetDataFixed(b, (void*)&value);
_Sum(a->grad, b, a->grad);
DelTensorBuf(b);
node->visitMark = NODE_FINISHED;
} }
/* /*
......
...@@ -79,6 +79,8 @@ void _ReduceSumAll(const XTensor * source, DTYPE * value) ...@@ -79,6 +79,8 @@ void _ReduceSumAll(const XTensor * source, DTYPE * value)
/* call _ReduceSum function */ /* call _ReduceSum function */
_ReduceSumAll(source, target); _ReduceSumAll(source, target);
*value = target->Get0D(); *value = target->Get0D();
DelTensorBuf(target);
} }
/* /*
......
...@@ -71,7 +71,7 @@ bool TestReduceSumAll1() ...@@ -71,7 +71,7 @@ bool TestReduceSumAll1()
sGPU->SetData(sData, sUnitNum); sGPU->SetData(sData, sUnitNum);
/* call ReduceSumAll function */ /* call ReduceSumAll function */
summation = _ReduceSumAll(sGPU); summation = ReduceSumAllValue(*sGPU);
/* check results */ /* check results */
gpuTest = (fabs(answer - summation) < 1e-4F); gpuTest = (fabs(answer - summation) < 1e-4F);
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论