Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
N
NiuTrans.Tensor
概览
Overview
Details
Activity
Cycle Analytics
版本库
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
问题
0
Issues
0
列表
Board
标记
里程碑
合并请求
0
Merge Requests
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
Snippets
成员
Collapse sidebar
Close sidebar
活动
图像
聊天
创建新问题
作业
提交
Issue Boards
Open sidebar
杨迪
NiuTrans.Tensor
Commits
04f129fc
Commit
04f129fc
authored
Jul 17, 2019
by
huchi
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
clean all comments about requireLink
parent
c6f2dbdf
隐藏空白字符变更
内嵌
并排
正在显示
64 个修改的文件
包含
95 行增加
和
125 行删除
+95
-125
source/tensor/core/arithmetic/Div.cpp
+1
-2
source/tensor/core/arithmetic/Div.h
+1
-1
source/tensor/core/arithmetic/DivDim.cpp
+1
-2
source/tensor/core/arithmetic/DivDim.h
+1
-1
source/tensor/core/arithmetic/MatrixMul.cpp
+2
-2
source/tensor/core/arithmetic/MatrixMul.h
+2
-2
source/tensor/core/arithmetic/Multiply.cpp
+1
-2
source/tensor/core/arithmetic/Multiply.h
+1
-1
source/tensor/core/arithmetic/MultiplyDim.cpp
+2
-4
source/tensor/core/arithmetic/MultiplyDim.h
+2
-2
source/tensor/core/arithmetic/Negate.cpp
+1
-2
source/tensor/core/arithmetic/Negate.h
+1
-1
source/tensor/core/arithmetic/Sign.cpp
+1
-2
source/tensor/core/arithmetic/Sign.h
+1
-1
source/tensor/core/arithmetic/Sub.cpp
+1
-2
source/tensor/core/arithmetic/Sub.h
+1
-1
source/tensor/core/arithmetic/SubDim.cpp
+1
-2
source/tensor/core/arithmetic/SubDim.h
+1
-1
source/tensor/core/arithmetic/Sum.cpp
+1
-2
source/tensor/core/arithmetic/Sum.h
+1
-1
source/tensor/core/arithmetic/SumDim.cpp
+2
-4
source/tensor/core/arithmetic/SumDim.h
+2
-2
source/tensor/core/math/Binary.cpp
+1
-1
source/tensor/core/math/Binary.h
+3
-3
source/tensor/core/math/Clip.cpp
+1
-1
source/tensor/core/math/Clip.h
+1
-1
source/tensor/core/math/Power.cpp
+1
-2
source/tensor/core/math/Power.h
+1
-1
source/tensor/core/math/ScaleAndShift.cpp
+1
-2
source/tensor/core/math/ScaleAndShift.h
+1
-1
source/tensor/core/math/Unary.cpp
+2
-2
source/tensor/core/math/Unary.h
+13
-13
source/tensor/core/reduce/ReduceMax.cpp
+1
-2
source/tensor/core/reduce/ReduceMax.h
+1
-1
source/tensor/core/reduce/ReduceMean.cpp
+1
-2
source/tensor/core/reduce/ReduceMean.h
+1
-1
source/tensor/core/reduce/ReduceSum.cpp
+2
-3
source/tensor/core/reduce/ReduceSum.h
+2
-2
source/tensor/core/reduce/ReduceSumSquared.cpp
+1
-2
source/tensor/core/reduce/ReduceSumSquared.h
+1
-1
source/tensor/core/reduce/ReduceVariance.cpp
+1
-2
source/tensor/core/reduce/ReduceVariance.h
+1
-1
source/tensor/core/shape/Merge.cpp
+1
-1
source/tensor/core/shape/Merge.h
+1
-1
source/tensor/core/shape/Reshape.cpp
+1
-1
source/tensor/core/shape/Reshape.h
+1
-1
source/tensor/core/shape/Split.cpp
+1
-1
source/tensor/core/shape/Split.h
+1
-1
source/tensor/core/shape/Squeeze.cpp
+1
-1
source/tensor/core/shape/Squeeze.h
+1
-1
source/tensor/core/shape/Unsqueeze.cpp
+1
-1
source/tensor/core/shape/Unsqueeze.h
+1
-1
source/tensor/function/HardTanH.cpp
+1
-1
source/tensor/function/HardTanH.h
+1
-1
source/tensor/function/Identity.cpp
+1
-1
source/tensor/function/Identity.h
+1
-1
source/tensor/function/LogSoftmax.cpp
+9
-20
source/tensor/function/LogSoftmax.h
+1
-1
source/tensor/function/Rectify.cpp
+1
-1
source/tensor/function/Rectify.h
+1
-1
source/tensor/function/Sigmoid.cpp
+1
-1
source/tensor/function/Sigmoid.h
+1
-1
source/tensor/function/Softmax.cpp
+1
-1
source/tensor/function/Softmax.h
+1
-1
没有找到文件。
source/tensor/core/arithmetic/Div.cpp
查看文件 @
04f129fc
...
...
@@ -229,9 +229,8 @@ where i is the index of the item
>> c - result tensor
>> alpha - the coefficient
>> leadingDim - the dimension along which we perform broadcasting
>> requireLink - if add operation to network
*/
void
Div
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
alpha
,
int
leadingDim
,
bool
requireLink
)
void
Div
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
alpha
,
int
leadingDim
)
{
if
(
!
c
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
a
,
&
c
))
{
InitTensor
(
&
c
,
&
a
);
...
...
source/tensor/core/arithmetic/Div.h
查看文件 @
04f129fc
...
...
@@ -54,7 +54,7 @@ element-wise division of two tensors:
c(i) = a(i)/b(i) + \alpha * c(i)
where i is the index of the element
*/
void
Div
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
alpha
=
0
.
0
,
int
leadingDim
=
0
,
bool
requireLink
=
false
);
void
Div
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
alpha
=
0
.
0
,
int
leadingDim
=
0
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/arithmetic/DivDim.cpp
查看文件 @
04f129fc
...
...
@@ -183,9 +183,8 @@ i.e., a is divided with b by broadcasting
>> c - where we put result. we save it in a if c is NULL
>> n - the dimension index
>> alpha - the scaling factor
>> requireLink - if add operation to network
*/
void
DivDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
alpha
,
bool
requireLink
)
void
DivDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
alpha
)
{
if
(
!
c
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
a
,
&
c
))
{
InitTensor
(
&
c
,
&
a
);
...
...
source/tensor/core/arithmetic/DivDim.h
查看文件 @
04f129fc
...
...
@@ -59,7 +59,7 @@ c(i) = a/b + \alpha * c
where the size of b is equal to the n-th dimension of a,
i.e., a is divided with b by broadcasting
*/
void
DivDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
alpha
=
(
DTYPE
)
0
.
0
,
bool
requireLink
=
false
);
void
DivDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
alpha
=
(
DTYPE
)
0
.
0
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/arithmetic/MatrixMul.cpp
查看文件 @
04f129fc
...
...
@@ -304,7 +304,7 @@ XTensor MatrixMul(const XTensor &a, MATRIX_TRANS_TYPE transposedA,
void
MatrixMul
(
const
XTensor
&
a
,
MATRIX_TRANS_TYPE
transposedA
,
const
XTensor
&
b
,
MATRIX_TRANS_TYPE
transposedB
,
XTensor
&
c
,
DTYPE
alpha
,
XPRunner
*
parallelRunner
,
bool
requireLink
)
DTYPE
alpha
,
XPRunner
*
parallelRunner
)
{
CheckNTErrors
(
a
.
dataType
==
b
.
dataType
,
"Input tensors should have the same data type!"
);
CheckNTErrors
(
a
.
order
>=
2
&&
b
.
order
>=
2
,
"Input tensors must have a order >= 2!"
);
...
...
@@ -400,7 +400,7 @@ XTensor MatrixMul(const XTensor &a, const XTensor &b,
}
void
MatrixMul
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
alpha
,
XPRunner
*
parallelRunner
,
bool
requireLink
)
DTYPE
alpha
,
XPRunner
*
parallelRunner
)
{
CheckNTErrors
(
a
.
dataType
==
b
.
dataType
,
"Input tensors should have the same data type!"
);
CheckNTErrors
(
a
.
order
>=
2
&&
b
.
order
>=
2
,
"Input tensors must have a order >= 2!"
);
...
...
source/tensor/core/arithmetic/MatrixMul.h
查看文件 @
04f129fc
...
...
@@ -60,14 +60,14 @@ XTensor MatrixMul(const XTensor &a, MATRIX_TRANS_TYPE transposedA, const XTensor
DTYPE
alpha
=
(
DTYPE
)
1
.
0
,
XPRunner
*
parallelRunner
=
NULL
);
void
MatrixMul
(
const
XTensor
&
a
,
MATRIX_TRANS_TYPE
transposedA
,
const
XTensor
&
b
,
MATRIX_TRANS_TYPE
transposedB
,
XTensor
&
c
,
DTYPE
alpha
=
(
DTYPE
)
1
.
0
,
XPRunner
*
parallelRunner
=
NULL
,
bool
requireLink
=
false
);
XTensor
&
c
,
DTYPE
alpha
=
(
DTYPE
)
1
.
0
,
XPRunner
*
parallelRunner
=
NULL
);
/* matrix multiplication with no transposition c = a * b * alpha*/
XTensor
MatrixMul
(
const
XTensor
&
a
,
const
XTensor
&
b
,
DTYPE
alpha
=
(
DTYPE
)
1
.
0
,
XPRunner
*
parallelRunner
=
NULL
);
void
MatrixMul
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
alpha
=
(
DTYPE
)
1
.
0
,
XPRunner
*
parallelRunner
=
NULL
,
bool
requireLink
=
false
);
DTYPE
alpha
=
(
DTYPE
)
1
.
0
,
XPRunner
*
parallelRunner
=
NULL
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/arithmetic/Multiply.cpp
查看文件 @
04f129fc
...
...
@@ -230,9 +230,8 @@ where i is the index of the item
>> c - result tensor
>> alpha - the coefficient
>> leadingDim - the dimension along which we perform broadcasting
>> requireLink - if add operation to network
*/
void
Multiply
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
alpha
,
int
leadingDim
,
bool
requireLink
)
void
Multiply
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
alpha
,
int
leadingDim
)
{
if
(
!
c
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
a
,
&
c
))
{
InitTensor
(
&
c
,
&
a
);
...
...
source/tensor/core/arithmetic/Multiply.h
查看文件 @
04f129fc
...
...
@@ -54,7 +54,7 @@ element-wise product of two tensors:
c(i) = a(i)*b(i) + \alpha * c(i)
where i is the index of the element
*/
void
Multiply
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
alpha
=
0
.
0
,
int
leadingDim
=
0
,
bool
requireLink
=
false
);
void
Multiply
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
alpha
=
0
.
0
,
int
leadingDim
=
0
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/arithmetic/MultiplyDim.cpp
查看文件 @
04f129fc
...
...
@@ -180,9 +180,8 @@ i.e., a is multiplied with b by broadcasting
>> b - another tensor whose size is equal to that of dimension n of a
>> c - where we put a * b + \alpha * c. we save it in a if c is NULL
>> n - the dimension index
>> requireLink - if add operation to network
*/
void
MultiplyDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
bool
requireLink
)
void
MultiplyDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
)
{
if
(
!
c
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
a
,
&
c
))
{
InitTensor
(
&
c
,
&
a
);
...
...
@@ -347,9 +346,8 @@ where some of dimensions of b can be of size 1
>> a - a tensor
>> b - another tensor that would be broadcasted
>> c - the resulting tensor
>> requireLink - if add operation to network
*/
void
MultiplyBroadcast
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
bool
requireLink
)
void
MultiplyBroadcast
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
)
{
if
(
!
c
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
a
,
&
c
))
{
InitTensor
(
&
c
,
&
a
);
...
...
source/tensor/core/arithmetic/MultiplyDim.h
查看文件 @
04f129fc
...
...
@@ -40,7 +40,7 @@ XTensor MultiplyDim(const XTensor &a, const XTensor &b, int n);
/* tensor multiplication c = a * b + \alpha * c where the size of b is equal to the n-th dimension of a,
i.e., a is multiplied with b by broadcasting */
void
MultiplyDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
bool
requireLink
=
false
);
void
MultiplyDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
);
/* tensor multiplication summation c = a * b + c * \beta where some of dimensions of b can be of size 1 */
void
_MultiplyBroadcast
(
const
XTensor
*
a
,
const
XTensor
*
b
,
XTensor
*
c
,
DTYPE
beta
=
(
DTYPE
)
1
.
0
);
...
...
@@ -50,7 +50,7 @@ void _MultiplyBroadcast(const XTensor * a, const XTensor * b, XTensor * c, DTYPE
XTensor
MultiplyBroadcast
(
const
XTensor
&
a
,
const
XTensor
&
b
);
/* tensor multiplication summation c = a * b + c * \beta where some of dimensions of b can be of size 1 */
void
MultiplyBroadcast
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
bool
requireLink
=
false
);
void
MultiplyBroadcast
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/arithmetic/Negate.cpp
查看文件 @
04f129fc
...
...
@@ -83,9 +83,8 @@ XTensor Negate(const XTensor & a)
set every entry to its minus value
>> a - input tensor we are processing
>> b - output tensor we are processing
>> requireLink - if add operation to network
*/
void
Negate
(
const
XTensor
&
a
,
XTensor
&
b
,
bool
requireLink
)
void
Negate
(
const
XTensor
&
a
,
XTensor
&
b
)
{
if
(
!
b
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
a
,
&
b
))
{
InitTensor
(
&
b
,
&
a
);
...
...
source/tensor/core/arithmetic/Negate.h
查看文件 @
04f129fc
...
...
@@ -42,7 +42,7 @@ make a new tensor to keep the result and return it
XTensor
Negate
(
const
XTensor
&
a
);
/* set every entry to its minus value */
void
Negate
(
const
XTensor
&
a
,
XTensor
&
b
,
bool
requireLink
=
false
);
void
Negate
(
const
XTensor
&
a
,
XTensor
&
b
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/arithmetic/Sign.cpp
查看文件 @
04f129fc
...
...
@@ -89,9 +89,8 @@ XTensor Sign(const XTensor & a)
set every entry to its sign value
>> a - input tensor we are processing
>> b - output tensor we are processing
>> requireLink - if add operation to network
*/
void
Sign
(
const
XTensor
&
a
,
XTensor
&
b
,
bool
requireLink
)
void
Sign
(
const
XTensor
&
a
,
XTensor
&
b
)
{
if
(
!
b
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
a
,
&
b
))
{
InitTensor
(
&
b
,
&
a
);
...
...
source/tensor/core/arithmetic/Sign.h
查看文件 @
04f129fc
...
...
@@ -42,7 +42,7 @@ make a new tensor to keep the result and return it
XTensor
Sign
(
const
XTensor
&
a
);
/* set every entry to its sign value */
void
Sign
(
const
XTensor
&
a
,
XTensor
&
b
,
bool
requireLink
=
false
);
void
Sign
(
const
XTensor
&
a
,
XTensor
&
b
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/arithmetic/Sub.cpp
查看文件 @
04f129fc
...
...
@@ -203,9 +203,8 @@ tensor subtraction c = a - b * \beta
>> b - another tensor
>> c - where we put a-b*\beta. we save it in a if c is NULL
>> beta - the scaling factor
>> requireLink - if add operation to network
*/
void
Sub
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
,
bool
requireLink
)
void
Sub
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
)
{
if
(
!
c
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
a
,
&
c
))
{
InitTensor
(
&
c
,
&
a
);
...
...
source/tensor/core/arithmetic/Sub.h
查看文件 @
04f129fc
...
...
@@ -43,7 +43,7 @@ make a new tensor c to keep the result and return it
XTensor
Sub
(
const
XTensor
&
a
,
const
XTensor
&
b
,
DTYPE
beta
=
(
DTYPE
)
1
.
0
);
/* tensor subtraction c = a - b * \beta */
void
Sub
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
=
(
DTYPE
)
1
.
0
,
bool
requireLink
=
false
);
void
Sub
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
=
(
DTYPE
)
1
.
0
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/arithmetic/SubDim.cpp
查看文件 @
04f129fc
...
...
@@ -183,9 +183,8 @@ i.e., a is subtracted with b by broadcasting
>> c - where we put a-b*\beta. we save it in a if c is NULL
>> n - the dimension index
>> beta - the scaling factor
>> requireLink - if add operation to network
*/
void
SubDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
beta
,
bool
requireLink
)
void
SubDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
beta
)
{
if
(
!
c
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
a
,
&
c
))
{
InitTensor
(
&
c
,
&
a
);
...
...
source/tensor/core/arithmetic/SubDim.h
查看文件 @
04f129fc
...
...
@@ -40,7 +40,7 @@ XTensor SubDim(const XTensor &a, const XTensor &b, int n, DTYPE beta = (DTYPE)1.
/* tensor subtraction c = a - b * \beta where the size of b is equal to the n-th dimension of a,
i.e., a is subtracted with b by broadcasting*/
void
SubDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
beta
=
(
DTYPE
)
1
.
0
,
bool
requireLink
=
false
);
void
SubDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
beta
=
(
DTYPE
)
1
.
0
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/arithmetic/Sum.cpp
查看文件 @
04f129fc
...
...
@@ -207,9 +207,8 @@ tensor summation c = a + b * \beta
>> a - a tensor
>> b - another tensor
>> beta - the scaling factor
>> requireLink - if add operation to network
*/
void
Sum
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
,
bool
requireLink
)
void
Sum
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
)
{
if
(
!
c
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
a
,
&
c
))
{
InitTensor
(
&
c
,
&
a
);
...
...
source/tensor/core/arithmetic/Sum.h
查看文件 @
04f129fc
...
...
@@ -42,7 +42,7 @@ make a new tensor c to keep the result and return it
XTensor
Sum
(
const
XTensor
&
a
,
const
XTensor
&
b
,
DTYPE
beta
=
(
DTYPE
)
1
.
0
);
/* tensor summation c = a + b * \beta */
void
Sum
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
=
(
DTYPE
)
1
.
0
,
bool
requireLink
=
false
);
void
Sum
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
=
(
DTYPE
)
1
.
0
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/arithmetic/SumDim.cpp
查看文件 @
04f129fc
...
...
@@ -200,9 +200,8 @@ i.e., a is summed with b by broadcasting
>> c - where we put a+b*\beta. we save it in a if c is NULL
>> n - the dimension index
>> beta - the scaling factor
>> requireLink - if add operation to network
*/
void
SumDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
beta
,
bool
requireLink
)
void
SumDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
beta
)
{
if
(
!
c
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
a
,
&
c
))
{
InitTensor
(
&
c
,
&
a
);
...
...
@@ -368,9 +367,8 @@ c = a + b * \beta
>> b - another tensor that would be broadcasted
>> c - the resulting tensor
>> beta - the scaling factor
>> requireLink - if add operation to network
*/
void
SumBroadcast
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
,
bool
requireLink
)
void
SumBroadcast
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
)
{
if
(
!
c
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
a
,
&
c
))
{
InitTensor
(
&
c
,
&
a
);
...
...
source/tensor/core/arithmetic/SumDim.h
查看文件 @
04f129fc
...
...
@@ -44,7 +44,7 @@ XTensor SumDim(const XTensor &a, const XTensor &b, int n, DTYPE beta = (DTYPE)1.
/* tensor summation c = a + b * \beta where the size of b is equal to the n-th dimension of a,
i.e., a is summed with b by broadcasting */
void
SumDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
beta
=
(
DTYPE
)
1
.
0
,
bool
requireLink
=
false
);
void
SumDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
beta
=
(
DTYPE
)
1
.
0
);
/* tensor broadcast summation c = a + b * \beta where some of dimensions of b can be of size 1 */
void
_SumBroadcast
(
const
XTensor
*
a
,
const
XTensor
*
b
,
XTensor
*
c
,
DTYPE
beta
=
(
DTYPE
)
1
.
0
);
...
...
@@ -54,7 +54,7 @@ void _SumBroadcast(const XTensor * a, const XTensor * b, XTensor * c, DTYPE beta
XTensor
SumBroadcast
(
const
XTensor
&
a
,
const
XTensor
&
b
,
DTYPE
beta
=
(
DTYPE
)
1
.
0
);
/* tensor broadcast summation c = a + b * \beta where some of dimensions of b can be of size 1 */
void
SumBroadcast
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
=
(
DTYPE
)
1
.
0
,
bool
requireLink
=
false
);
void
SumBroadcast
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
=
(
DTYPE
)
1
.
0
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/math/Binary.cpp
查看文件 @
04f129fc
...
...
@@ -126,7 +126,7 @@ XTensor funcName(const XTensor &a, float num) \
} \
#define SIMPLE_BINARY_FUNCTION_VOID(funcName, _funcName, operationId) \
void funcName(const XTensor &a, XTensor &b, float num
, bool requireLink
) \
void funcName(const XTensor &a, XTensor &b, float num) \
{ \
if (!b.isInit || !XTensor::IsSameShaped(&a, &b)) { \
InitTensor(&b, &a); \
...
...
source/tensor/core/math/Binary.h
查看文件 @
04f129fc
...
...
@@ -45,7 +45,7 @@ scale up tensor entires
b = a * scale
*/
void
Scale
(
const
XTensor
&
a
,
XTensor
&
b
,
int
scale
);
void
Scale
(
const
XTensor
&
a
,
XTensor
&
b
,
float
scale
,
bool
requireLink
=
false
);
void
Scale
(
const
XTensor
&
a
,
XTensor
&
b
,
float
scale
);
/*
scale up tensor entires (return an XTensor structure)
...
...
@@ -72,7 +72,7 @@ descale tensor entires
b = a / scale
*/
void
Descale
(
const
XTensor
&
a
,
XTensor
&
b
,
int
scale
);
void
Descale
(
const
XTensor
&
a
,
XTensor
&
b
,
float
scale
,
bool
requireLink
=
false
);
void
Descale
(
const
XTensor
&
a
,
XTensor
&
b
,
float
scale
);
/*
descale tensor entires (return an XTensor structure)
...
...
@@ -99,7 +99,7 @@ shift tensor entires
b = a + shift
*/
void
Shift
(
const
XTensor
&
a
,
XTensor
&
b
,
int
shift
);
void
Shift
(
const
XTensor
&
a
,
XTensor
&
b
,
float
shift
,
bool
requireLink
=
false
);
void
Shift
(
const
XTensor
&
a
,
XTensor
&
b
,
float
shift
);
/*
shift tensor entires (return an XTensor structure)
...
...
source/tensor/core/math/Clip.cpp
查看文件 @
04f129fc
...
...
@@ -94,7 +94,7 @@ XTensor Clip(const XTensor & a, DTYPE lower, DTYPE upper)
return
b
;
}
void
Clip
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
lower
,
DTYPE
upper
,
bool
requireLink
)
void
Clip
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
lower
,
DTYPE
upper
)
{
if
(
!
b
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
a
,
&
b
))
{
InitTensor
(
&
b
,
&
a
);
...
...
source/tensor/core/math/Clip.h
查看文件 @
04f129fc
...
...
@@ -37,7 +37,7 @@ void _ClipMe(XTensor * a, DTYPE lower, DTYPE upper);
make a new tensor to keep the result and return it */
XTensor
Clip
(
const
XTensor
&
a
,
DTYPE
lower
,
DTYPE
upper
);
void
Clip
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
lower
,
DTYPE
upper
,
bool
requireLink
=
false
);
void
Clip
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
lower
,
DTYPE
upper
);
/*
backward of Clip function
...
...
source/tensor/core/math/Power.cpp
查看文件 @
04f129fc
...
...
@@ -107,9 +107,8 @@ get the power(a, p)
>> a - input tensor
>> b - output tensor
>> p - parameter
>> requireLink - if add operation to network
*/
void
Power
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
p
,
bool
requireLink
)
void
Power
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
p
)
{
if
(
!
b
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
a
,
&
b
))
{
InitTensor
(
&
b
,
&
a
);
...
...
source/tensor/core/math/Power.h
查看文件 @
04f129fc
...
...
@@ -42,7 +42,7 @@ make a new tensor to keep the result and return it
XTensor
Power
(
const
XTensor
&
a
,
DTYPE
p
);
/* get the power(x, y) */
void
Power
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
p
,
bool
requireLink
=
false
);
void
Power
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
p
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/math/ScaleAndShift.cpp
查看文件 @
04f129fc
...
...
@@ -127,9 +127,8 @@ b = a * scale + shift
>> b - the output tensor
>> scale - the scaler factor
>> shift - the shift factor
>> requireLink - if add operation to network
*/
void
ScaleAndShift
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
scale
,
DTYPE
shift
,
bool
requireLink
)
void
ScaleAndShift
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
scale
,
DTYPE
shift
)
{
if
(
!
b
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
a
,
&
b
))
{
InitTensor
(
&
b
,
&
a
);
...
...
source/tensor/core/math/ScaleAndShift.h
查看文件 @
04f129fc
...
...
@@ -54,7 +54,7 @@ XTensor ScaleAndShift(const XTensor &a, DTYPE scale, DTYPE shift = 0);
scale and shift all tensor entires
b = a * scale + shift
*/
void
ScaleAndShift
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
scale
,
DTYPE
shift
=
0
,
bool
requireLink
=
false
);
void
ScaleAndShift
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
scale
,
DTYPE
shift
=
0
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/math/Unary.cpp
查看文件 @
04f129fc
...
...
@@ -83,7 +83,7 @@ XTensor funcName(const XTensor &a) \
}
#define SIMPLE_UNARY_FUNCTION_VOID(funcName, _funcName, operationId) \
void funcName(const XTensor &a, XTensor &b
, bool requireLink
) \
void funcName(const XTensor &a, XTensor &b) \
{ \
if (!b.isInit || !XTensor::IsSameShaped(&a, &b)) { \
InitTensor(&b, &a); \
...
...
@@ -189,7 +189,7 @@ XTensor funcName(const XTensor &a) \
return b; \
}
#define SIMPLE_UNARY_FUNCTION_VOID(funcName, _funcName, operationId) \
void funcName(const XTensor &a, XTensor &b
, bool requireLink
) \
void funcName(const XTensor &a, XTensor &b) \
{ \
if (!b.isInit || !XTensor::IsSameShaped(&a, &b)) { \
InitTensor(&b, &a); \
...
...
source/tensor/core/math/Unary.h
查看文件 @
04f129fc
...
...
@@ -35,7 +35,7 @@ void _AbsoluteMe(XTensor * a);
make a new tensor to keep the result and return it */
XTensor
Absolute
(
const
XTensor
&
a
);
/* set every entry to its absolute value */
void
Absolute
(
const
XTensor
&
a
,
XTensor
&
b
,
bool
requireLink
=
false
);
void
Absolute
(
const
XTensor
&
a
,
XTensor
&
b
);
/* set every entry to its ceil value */
void
_Ceil
(
const
XTensor
*
a
,
XTensor
*
b
);
...
...
@@ -46,7 +46,7 @@ void _CeilMe(XTensor * a);
make a new tensor to keep the result and return it */
XTensor
Ceil
(
const
XTensor
&
a
);
/* set every entry to its ceil value */
void
Ceil
(
const
XTensor
&
a
,
XTensor
&
b
,
bool
requireLink
=
false
);
void
Ceil
(
const
XTensor
&
a
,
XTensor
&
b
);
/* set every entry to its exponent value */
void
_Exp
(
const
XTensor
*
a
,
XTensor
*
b
);
...
...
@@ -57,7 +57,7 @@ void _ExpMe(XTensor * a);
make a new tensor to keep the result and return it */
XTensor
Exp
(
const
XTensor
&
a
);
/* set every entry to its exponent value */
void
Exp
(
const
XTensor
&
a
,
XTensor
&
b
,
bool
requireLink
=
false
);
void
Exp
(
const
XTensor
&
a
,
XTensor
&
b
);
/* set every entry to its floor value */
void
_Floor
(
const
XTensor
*
a
,
XTensor
*
b
);
...
...
@@ -68,7 +68,7 @@ void _FloorMe(XTensor * a);
make a new tensor to keep the result and return it */
XTensor
Floor
(
const
XTensor
&
a
);
/* set every entry to its floor value */
void
Floor
(
const
XTensor
&
a
,
XTensor
&
b
,
bool
requireLink
=
false
);
void
Floor
(
const
XTensor
&
a
,
XTensor
&
b
);
/* if source entry is non-zero, set target entry to be one, otherwise zero */
void
_IsNonZero
(
const
XTensor
*
a
,
XTensor
*
b
);
...
...
@@ -79,7 +79,7 @@ void _IsNonZeroMe(XTensor *a);
make a new tensor to keep the result and return it */
XTensor
IsNonZero
(
const
XTensor
&
a
);
/* if source entry is non-zero, set target entry to be one, otherwise zero */
void
IsNonZero
(
const
XTensor
&
a
,
XTensor
&
b
,
bool
requireLink
=
false
);
void
IsNonZero
(
const
XTensor
&
a
,
XTensor
&
b
);
/* if source entry is zero, set target entry to be one, otherwise zero */
void
_IsZero
(
const
XTensor
*
a
,
XTensor
*
b
);
...
...
@@ -90,7 +90,7 @@ void _IsZeroMe(XTensor *a);
make a new tensor to keep the result and return it */
XTensor
IsZero
(
const
XTensor
&
a
);
/* if source entry is zero, set target entry to be one, otherwise zero */
void
IsZero
(
const
XTensor
&
a
,
XTensor
&
b
,
bool
requireLink
=
false
);
void
IsZero
(
const
XTensor
&
a
,
XTensor
&
b
);
/* set every entry to its logarithm value */
void
_Log
(
const
XTensor
*
a
,
XTensor
*
b
);
...
...
@@ -101,7 +101,7 @@ void _LogMe(XTensor * a);
make a new tensor to keep the result and return it */
XTensor
Log
(
const
XTensor
&
a
);
/* set every entry to its logarithm value */
void
Log
(
const
XTensor
&
a
,
XTensor
&
b
,
bool
requireLink
=
false
);
void
Log
(
const
XTensor
&
a
,
XTensor
&
b
);
/* set every entry to its round value */
void
_Round
(
const
XTensor
*
a
,
XTensor
*
b
);
...
...
@@ -112,7 +112,7 @@ void _RoundMe(XTensor * a);
make a new tensor to keep the result and return it */
XTensor
Round
(
const
XTensor
&
a
);
/* set every entry to its round value */
void
Round
(
const
XTensor
&
a
,
XTensor
&
b
,
bool
requireLink
=
false
);
void
Round
(
const
XTensor
&
a
,
XTensor
&
b
);
/* set every entry to its sqrt value */
void
_Sqrt
(
const
XTensor
*
a
,
XTensor
*
b
);
...
...
@@ -123,7 +123,7 @@ void _SqrtMe(XTensor * a);
make a new tensor to keep the result and return it */
XTensor
Sqrt
(
const
XTensor
&
a
);
/* set every entry to its sqrt value */
void
Sqrt
(
const
XTensor
&
a
,
XTensor
&
b
,
bool
requireLink
=
false
);
void
Sqrt
(
const
XTensor
&
a
,
XTensor
&
b
);
/* set every entry to its square value */
void
_Square
(
const
XTensor
*
a
,
XTensor
*
b
);
...
...
@@ -134,7 +134,7 @@ void _SquareMe(XTensor * a);
make a new tensor to keep the result and return it */
XTensor
Square
(
const
XTensor
&
a
);
/* set every entry to its square value */
void
Square
(
const
XTensor
&
a
,
XTensor
&
b
,
bool
requireLink
=
false
);
void
Square
(
const
XTensor
&
a
,
XTensor
&
b
);
/* set every entry to its sine value */
...
...
@@ -146,7 +146,7 @@ void _SinMe(XTensor * a);
make a new tensor to keep the result and return it */
XTensor
Sin
(
const
XTensor
&
a
);
/* set every entry to its sine value */
void
Sin
(
const
XTensor
&
a
,
XTensor
&
b
,
bool
requireLink
=
false
);
void
Sin
(
const
XTensor
&
a
,
XTensor
&
b
);
/* set every entry to its cosine value */
void
_Cos
(
const
XTensor
*
a
,
XTensor
*
b
);
...
...
@@ -157,7 +157,7 @@ void _CosMe(XTensor * a);
make a new tensor to keep the result and return it */
XTensor
Cos
(
const
XTensor
&
a
);
/* set every entry to its cosine value */
void
Cos
(
const
XTensor
&
a
,
XTensor
&
b
,
bool
requireLink
=
false
);
void
Cos
(
const
XTensor
&
a
,
XTensor
&
b
);
/* set every entry to its tangent value */
void
_Tan
(
const
XTensor
*
a
,
XTensor
*
b
);
...
...
@@ -168,7 +168,7 @@ void _TanMe(XTensor * a);
make a new tensor to keep the result and return it */
XTensor
Tan
(
const
XTensor
&
a
);
/* set every entry to its tangent value */
void
Tan
(
const
XTensor
&
a
,
XTensor
&
b
,
bool
requireLink
=
false
);
void
Tan
(
const
XTensor
&
a
,
XTensor
&
b
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/reduce/ReduceMax.cpp
查看文件 @
04f129fc
...
...
@@ -137,9 +137,8 @@ get the max value of the items along a dimension of the tensor
>> input - the input tensor
>> output - the output tensor
>> dim - the dimension where the reduction is performed on
>> requireLink - if add operation to network
*/
void
ReduceMax
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
bool
requireLink
)
void
ReduceMax
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
)
{
CheckNTErrors
(
dim
>=
0
&&
dim
<
input
.
order
,
"Illegal dimension to reduce!"
);
...
...
source/tensor/core/reduce/ReduceMax.h
查看文件 @
04f129fc
...
...
@@ -36,7 +36,7 @@ make a new tensor to keep the result and return it
XTensor
ReduceMax
(
const
XTensor
&
input
,
int
dim
);
/* get the max value of the items along a dimension of the tensor. */
void
ReduceMax
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
bool
requireLink
=
false
);
void
ReduceMax
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/reduce/ReduceMean.cpp
查看文件 @
04f129fc
...
...
@@ -94,9 +94,8 @@ For a 1-dimensional data array a, mean = (1/n) * sum_i input_i
>> input - the input tensor
>> output - the output tensor
>> dim - the dimension where the reduction is performed on
>> requireLink - if add operation to network
*/
void
ReduceMean
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
bool
requireLink
)
void
ReduceMean
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
)
{
CheckNTErrors
(
dim
>=
0
&&
dim
<
input
.
order
,
"Illegal dimension to reduce!"
);
...
...
source/tensor/core/reduce/ReduceMean.h
查看文件 @
04f129fc
...
...
@@ -43,7 +43,7 @@ XTensor ReduceMean(const XTensor &input, int dim);
get the mean value along a dimension of the tensor
For a 1-dimensional data array a, mean = (1/n) * sum_i input_i
*/
void
ReduceMean
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
bool
requireLink
=
false
);
void
ReduceMean
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/reduce/ReduceSum.cpp
查看文件 @
04f129fc
...
...
@@ -244,7 +244,7 @@ XTensor ReduceSum(const XTensor &input, int dim, const XTensor &shift, DTYPE pow
return
output
;
}
void
ReduceSum
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
const
XTensor
&
shift
,
DTYPE
power
,
bool
isExp
,
bool
requireLink
)
void
ReduceSum
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
const
XTensor
&
shift
,
DTYPE
power
,
bool
isExp
)
{
CheckNTErrors
(
dim
>=
0
&&
dim
<
input
.
order
,
"Illegal dimension to reduce!"
);
...
...
@@ -336,9 +336,8 @@ sum = \sum_i exp((a_i - shift)^power) if isExp == true
>> shift - shift the input
>> ieExp - specify if the exp() is performed
>> power - we perform pow(item_i, power) on each item in the array
>> requireLink - if add operation to network
*/
void
ReduceSum
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
DTYPE
power
,
bool
isExp
,
bool
requireLink
)
void
ReduceSum
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
DTYPE
power
,
bool
isExp
)
{
CheckNTErrors
(
dim
>=
0
&&
dim
<
input
.
order
,
"Illegal dimension to reduce!"
);
...
...
source/tensor/core/reduce/ReduceSum.h
查看文件 @
04f129fc
...
...
@@ -44,7 +44,7 @@ sum = \sum_i exp(a_i - shift) if isExp == true
*/
XTensor
ReduceSum
(
const
XTensor
&
input
,
int
dim
,
const
XTensor
&
shift
,
DTYPE
power
=
(
DTYPE
)
1
.
0
F
,
bool
isExp
=
false
);
void
ReduceSum
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
const
XTensor
&
shift
,
DTYPE
power
=
(
DTYPE
)
1
.
0
F
,
bool
isExp
=
false
,
bool
requireLink
=
false
);
void
ReduceSum
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
const
XTensor
&
shift
,
DTYPE
power
=
(
DTYPE
)
1
.
0
F
,
bool
isExp
=
false
);
/*
sum the items along a dimension of the tensor (return an XTensor structure)
...
...
@@ -61,7 +61,7 @@ For a 1-dimensional data array a,
sum = \sum_i (a_i - shift) if isExp == false
sum = \sum_i exp(a_i - shift) if isExp == true
*/
void
ReduceSum
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
DTYPE
power
=
(
DTYPE
)
1
.
0
F
,
bool
isExp
=
false
,
bool
requireLink
=
false
);
void
ReduceSum
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
DTYPE
power
=
(
DTYPE
)
1
.
0
F
,
bool
isExp
=
false
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/reduce/ReduceSumSquared.cpp
查看文件 @
04f129fc
...
...
@@ -91,9 +91,8 @@ For a 1-dimensional data array a, sum = \sum_i (a_i - shift)^2
>> output - the output tensor
>> dim - the dimension where the reduction is performed on
>> shift - bias on the input
>> requireLink - if add operation to network
*/
void
ReduceSumSquared
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
const
XTensor
&
shift
,
bool
requireLink
)
void
ReduceSumSquared
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
const
XTensor
&
shift
)
{
CheckNTErrors
(
dim
>=
0
&&
dim
<
input
.
order
,
"Illegal dimension to reduce!"
);
...
...
source/tensor/core/reduce/ReduceSumSquared.h
查看文件 @
04f129fc
...
...
@@ -45,7 +45,7 @@ squared sum of the items along a dimension of the tensor
For a 1-dimensional data array a,
sum = \sum_i (a_i - shift)^2
*/
void
ReduceSumSquared
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
const
XTensor
&
shift
,
bool
requireLink
=
false
);
void
ReduceSumSquared
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
const
XTensor
&
shift
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/reduce/ReduceVariance.cpp
查看文件 @
04f129fc
...
...
@@ -94,9 +94,8 @@ For a 1-dimensional data array a, variance = 1/n * \sum_i (a_i - mean)^2
>> output - the output tensor
>> dim - the dimension where the reduction is performed on
>> mean - the mean value
>> requireLink - if add operation to network
*/
void
ReduceVariance
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
const
XTensor
&
mean
,
bool
requireLink
)
void
ReduceVariance
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
const
XTensor
&
mean
)
{
CheckNTErrors
(
dim
>=
0
&&
dim
<
input
.
order
,
"Illegal dimension to reduce!"
);
...
...
source/tensor/core/reduce/ReduceVariance.h
查看文件 @
04f129fc
...
...
@@ -43,7 +43,7 @@ XTensor ReduceVariance(const XTensor &input, int dim, const XTensor &mean);
variance of the items along a dimension of the tensor
For a 1-dimensional data array a, variance = 1/n * \sum_i (a_i - mean)^2
*/
void
ReduceVariance
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
const
XTensor
&
mean
,
bool
requireLink
=
false
);
void
ReduceVariance
(
const
XTensor
&
input
,
XTensor
&
output
,
int
dim
,
const
XTensor
&
mean
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/shape/Merge.cpp
查看文件 @
04f129fc
...
...
@@ -232,7 +232,7 @@ XTensor Merge(const XTensor &s, int whereToMerge, int leadingDim)
return
t
;
}
void
Merge
(
const
XTensor
&
s
,
XTensor
&
t
,
int
whereToMerge
,
int
leadingDim
,
bool
requireLink
)
void
Merge
(
const
XTensor
&
s
,
XTensor
&
t
,
int
whereToMerge
,
int
leadingDim
)
{
if
(
!
t
.
isInit
||
!
CheckMergeSize
(
&
s
,
&
t
,
whereToMerge
,
leadingDim
))
{
if
(
leadingDim
<
0
)
...
...
source/tensor/core/shape/Merge.h
查看文件 @
04f129fc
...
...
@@ -33,7 +33,7 @@ void _Merge(const XTensor * s, XTensor * t, int whereToMerge, int leadingDim = -
e.g., (M, N/3, 3) -> (M, N) */
XTensor
Merge
(
const
XTensor
&
s
,
int
whereToMerge
,
int
leadingDim
=
-
1
);
void
Merge
(
const
XTensor
&
s
,
XTensor
&
t
,
int
whereToMerge
,
int
leadingDim
=
-
1
,
bool
requireLink
=
false
);
void
Merge
(
const
XTensor
&
s
,
XTensor
&
t
,
int
whereToMerge
,
int
leadingDim
=
-
1
);
/* merge small tensors into a big tensor */
void
_Merge
(
const
TensorList
*
smalls
,
XTensor
*
big
,
int
whereToMerge
);
...
...
source/tensor/core/shape/Reshape.cpp
查看文件 @
04f129fc
...
...
@@ -48,7 +48,7 @@ XTensor Reshape(XTensor &s, int order, int * dimSize)
return
t
;
}
void
Reshape
(
XTensor
&
s
,
XTensor
&
t
,
int
order
,
int
*
dimSize
,
bool
requireLink
)
void
Reshape
(
XTensor
&
s
,
XTensor
&
t
,
int
order
,
int
*
dimSize
)
{
if
(
!
t
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
t
,
&
s
))
{
InitTensor
(
&
t
,
&
s
);
...
...
source/tensor/core/shape/Reshape.h
查看文件 @
04f129fc
...
...
@@ -29,7 +29,7 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
/* reshape the tensor */
XTensor
Reshape
(
XTensor
&
s
,
int
order
,
int
*
dimSize
);
void
Reshape
(
XTensor
&
s
,
XTensor
&
t
,
int
order
,
int
*
dimSize
,
bool
requireLink
=
false
);
void
Reshape
(
XTensor
&
s
,
XTensor
&
t
,
int
order
,
int
*
dimSize
);
}
// namespace nts(NiuTrans.Tensor)
#endif // __RESHAPE_H__
source/tensor/core/shape/Split.cpp
查看文件 @
04f129fc
...
...
@@ -227,7 +227,7 @@ XTensor Split(const XTensor &s, int whereToSplit, int splitNum)
return
t
;
}
void
Split
(
const
XTensor
&
s
,
XTensor
&
t
,
int
whereToSplit
,
int
splitNum
,
bool
requireLink
)
void
Split
(
const
XTensor
&
s
,
XTensor
&
t
,
int
whereToSplit
,
int
splitNum
)
{
if
(
!
t
.
isInit
||
!
CheckSplitSize
(
&
s
,
&
t
,
whereToSplit
,
splitNum
))
{
int
order
=
s
.
order
+
1
;
...
...
source/tensor/core/shape/Split.h
查看文件 @
04f129fc
...
...
@@ -41,7 +41,7 @@ e.g., (M, N) -> (M, N/3, 3)
*/
XTensor
Split
(
const
XTensor
&
s
,
int
whereToSplit
,
int
splitNum
);
void
Split
(
const
XTensor
&
s
,
XTensor
&
t
,
int
whereToSplit
,
int
splitNum
,
bool
requireLink
=
false
);
void
Split
(
const
XTensor
&
s
,
XTensor
&
t
,
int
whereToSplit
,
int
splitNum
);
/* split a big tensor into small tensors */
void
_Split
(
const
XTensor
*
big
,
TensorList
*
smalls
,
int
whereToSplit
,
int
splitNum
);
...
...
source/tensor/core/shape/Squeeze.cpp
查看文件 @
04f129fc
...
...
@@ -112,7 +112,7 @@ XTensor Squeeze(XTensor & source, int leadingDim)
return
target
;
}
void
Squeeze
(
XTensor
&
source
,
XTensor
&
target
,
int
leadingDim
,
bool
requireLink
)
void
Squeeze
(
XTensor
&
source
,
XTensor
&
target
,
int
leadingDim
)
{
if
(
!
target
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
source
,
&
target
))
{
InitTensor
(
&
target
,
&
source
);
...
...
source/tensor/core/shape/Squeeze.h
查看文件 @
04f129fc
...
...
@@ -37,7 +37,7 @@ void _SqueezeMe(XTensor * source, int leadingDim = -1);
make a new tensor to keep the result and return it */
XTensor
Squeeze
(
XTensor
&
source
,
int
leadingDim
=
-
1
);
void
Squeeze
(
XTensor
&
source
,
XTensor
&
target
,
int
leadingDim
=
-
1
,
bool
requireLink
=
false
);
void
Squeeze
(
XTensor
&
source
,
XTensor
&
target
,
int
leadingDim
=
-
1
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/shape/Unsqueeze.cpp
查看文件 @
04f129fc
...
...
@@ -166,7 +166,7 @@ XTensor Unsqueeze(const XTensor &a, int dim, int dSize)
return
b
;
}
void
Unsqueeze
(
const
XTensor
&
a
,
XTensor
&
b
,
int
dim
,
int
dSize
,
bool
requireLink
)
void
Unsqueeze
(
const
XTensor
&
a
,
XTensor
&
b
,
int
dim
,
int
dSize
)
{
if
(
!
b
.
isInit
||
!
CheckUnsqueezeSize
(
&
a
,
&
b
,
dim
,
dSize
))
{
int
order
=
a
.
order
+
1
;
...
...
source/tensor/core/shape/Unsqueeze.h
查看文件 @
04f129fc
...
...
@@ -35,7 +35,7 @@ void _Unsqueeze(const XTensor * a, XTensor * b, int dim, int dSize);
make a new tensor to keep the result and return it */
XTensor
Unsqueeze
(
const
XTensor
&
a
,
int
dim
,
int
dSize
);
void
Unsqueeze
(
const
XTensor
&
a
,
XTensor
&
b
,
int
dim
,
int
dSize
,
bool
requireLink
=
false
);
void
Unsqueeze
(
const
XTensor
&
a
,
XTensor
&
b
,
int
dim
,
int
dSize
);
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/function/HardTanH.cpp
查看文件 @
04f129fc
...
...
@@ -84,7 +84,7 @@ XTensor HardTanH(const XTensor &x)
return
y
;
}
void
HardTanH
(
const
XTensor
&
x
,
XTensor
&
y
,
bool
requireLink
)
void
HardTanH
(
const
XTensor
&
x
,
XTensor
&
y
)
{
if
(
!
y
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
y
,
&
x
))
{
InitTensor
(
&
y
,
&
x
);
...
...
source/tensor/function/HardTanH.h
查看文件 @
04f129fc
...
...
@@ -40,7 +40,7 @@ void _HardTanH(const XTensor * x, XTensor * y);
/* hard tanh function (return an XTensor structure) */
XTensor
HardTanH
(
const
XTensor
&
x
);
void
HardTanH
(
const
XTensor
&
x
,
XTensor
&
y
,
bool
requireLink
=
false
);
void
HardTanH
(
const
XTensor
&
x
,
XTensor
&
y
);
/* de/dx */
void
_HardTanHBackward
(
XTensor
*
gold
,
XTensor
*
y
,
XTensor
*
x
,
...
...
source/tensor/function/Identity.cpp
查看文件 @
04f129fc
...
...
@@ -58,7 +58,7 @@ XTensor Identity(const XTensor &x)
return
y
;
}
void
Identity
(
const
XTensor
&
x
,
XTensor
&
y
,
bool
requireLink
)
void
Identity
(
const
XTensor
&
x
,
XTensor
&
y
)
{
if
(
!
y
.
isInit
||
!
y
.
IsSameShaped
(
&
y
,
&
x
))
{
InitTensor
(
&
y
,
&
x
);
...
...
source/tensor/function/Identity.h
查看文件 @
04f129fc
...
...
@@ -33,7 +33,7 @@ void _Identity(const XTensor * x, XTensor * y);
/* identity function y = x (return an XTensor structure) */
XTensor
Identity
(
const
XTensor
&
x
);
void
Identity
(
const
XTensor
&
x
,
XTensor
&
y
,
bool
requireLink
=
false
);
void
Identity
(
const
XTensor
&
x
,
XTensor
&
y
);
/* de/dx */
void
_IdentityBackward
(
XTensor
*
gold
,
XTensor
*
y
,
XTensor
*
x
,
...
...
source/tensor/function/LogSoftmax.cpp
查看文件 @
04f129fc
...
...
@@ -194,7 +194,15 @@ XTensor LogSoftmax(const XTensor &x, int leadDim)
return
y
;
}
void
LogSoftmax
(
const
XTensor
&
x
,
XTensor
&
y
,
int
leadDim
,
bool
requireLink
)
/*
log scale softmax y = log(e^x / \sum_{i} e^{x_i})
make a new tensor to keep the result and return it
>> x - input vector
>> y - output vector
>> leadDim - leading dimension (along which we perform reduction)
*/
void
LogSoftmax
(
const
XTensor
&
x
,
XTensor
&
y
,
int
leadDim
)
{
int
ld
=
leadDim
;
if
(
ld
<
0
)
...
...
@@ -213,26 +221,7 @@ void LogSoftmax(const XTensor &x, XTensor &y, int leadDim, bool requireLink)
XLink
::
AddParamToHeadInt
(
&
y
,
ld
);
}
}
/*
log scale softmax y = log(e^x / \sum_{i} e^{x_i})
make a new tensor to keep the result and return it
>> x - input vector
>> y - output vector
>> leadDim - leading dimension (along which we perform reduction)
*/
void
LogSoftmax
(
const
XTensor
&
x
,
XTensor
&
y
,
int
leadDim
)
{
if
(
!
XTensor
::
IsSameShaped
(
&
x
,
&
y
))
InitTensor
(
&
y
,
&
x
);
/* call _LogSoftmax function */
_LogSoftmax
(
&
x
,
&
y
,
leadDim
);
/* tensor connection */
XLink
::
MakeLink
(
&
x
,
NULL
,
&
y
,
FUNC_LOGSOFTMAX
);
XLink
::
AddParamToHeadInt
(
&
y
,
leadDim
);
}
/*
backward computation for dense matrices with default data type
...
...
source/tensor/function/LogSoftmax.h
查看文件 @
04f129fc
...
...
@@ -33,7 +33,7 @@ void _LogSoftmax(const XTensor * x, XTensor * y, int leadDim);
/* log scale softmax y = log(e^x / \sum_{i} e^{x_i}) (return an XTensor structure) */
XTensor
LogSoftmax
(
const
XTensor
&
x
,
int
leadDim
);
void
LogSoftmax
(
const
XTensor
&
x
,
XTensor
&
y
,
int
leadDim
,
bool
requireLink
=
false
);
void
LogSoftmax
(
const
XTensor
&
x
,
XTensor
&
y
,
int
leadDim
);
/* log scale softmax y = log(e^x / \sum_{i} e^{x_i}) (with both argument of x and y) */
void
LogSoftmax
(
const
XTensor
&
x
,
XTensor
&
y
,
int
leadDim
);
...
...
source/tensor/function/Rectify.cpp
查看文件 @
04f129fc
...
...
@@ -77,7 +77,7 @@ XTensor Rectify(const XTensor &x)
return
y
;
}
void
Rectify
(
const
XTensor
&
x
,
XTensor
&
y
,
bool
requireLink
)
void
Rectify
(
const
XTensor
&
x
,
XTensor
&
y
)
{
if
(
!
y
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
y
,
&
x
))
{
InitTensor
(
&
y
,
&
x
);
...
...
source/tensor/function/Rectify.h
查看文件 @
04f129fc
...
...
@@ -33,7 +33,7 @@ void _Rectify(const XTensor * x, XTensor * y);
/* rectify function y = max(0, x) (return an XTensor structure) */
XTensor
Rectify
(
const
XTensor
&
x
);
void
Rectify
(
const
XTensor
&
x
,
XTensor
&
y
,
bool
requireLink
=
false
);
void
Rectify
(
const
XTensor
&
x
,
XTensor
&
y
);
/* de/dx */
void
_RectifyBackward
(
XTensor
*
gold
,
XTensor
*
y
,
XTensor
*
x
,
...
...
source/tensor/function/Sigmoid.cpp
查看文件 @
04f129fc
...
...
@@ -75,7 +75,7 @@ XTensor Sigmoid(const XTensor &x)
return
y
;
}
void
Sigmoid
(
const
XTensor
&
x
,
XTensor
&
y
,
bool
requireLink
)
void
Sigmoid
(
const
XTensor
&
x
,
XTensor
&
y
)
{
if
(
!
y
.
isInit
||
!
XTensor
::
IsSameShaped
(
&
y
,
&
x
))
{
InitTensor
(
&
y
,
&
x
);
...
...
source/tensor/function/Sigmoid.h
查看文件 @
04f129fc
...
...
@@ -33,7 +33,7 @@ void _Sigmoid(const XTensor * x, XTensor * y);
/* sigmoid function y = 1/(1+exp(-x)) (return an XTensor structure) */
XTensor
Sigmoid
(
const
XTensor
&
x
);
void
Sigmoid
(
const
XTensor
&
x
,
XTensor
&
y
,
bool
requireLink
=
false
);
void
Sigmoid
(
const
XTensor
&
x
,
XTensor
&
y
);
/* de/dx */
void
_SigmoidBackward
(
XTensor
*
gold
,
XTensor
*
y
,
XTensor
*
x
,
...
...
source/tensor/function/Softmax.cpp
查看文件 @
04f129fc
...
...
@@ -148,7 +148,7 @@ XTensor Softmax(const XTensor &x, int leadDim)
return
y
;
}
void
Softmax
(
const
XTensor
&
x
,
XTensor
&
y
,
int
leadDim
,
bool
requireLink
)
void
Softmax
(
const
XTensor
&
x
,
XTensor
&
y
,
int
leadDim
)
{
int
ld
=
leadDim
;
if
(
ld
<
0
)
...
...
source/tensor/function/Softmax.h
查看文件 @
04f129fc
...
...
@@ -33,7 +33,7 @@ void _Softmax(const XTensor * x, XTensor * y, int leadDim);
/* softmax y = e^x / \sum_{i} e^{x_i} (return an XTensor structure) */
XTensor
Softmax
(
const
XTensor
&
x
,
int
leadDim
);
void
Softmax
(
const
XTensor
&
x
,
XTensor
&
y
,
int
leadDim
,
bool
requireLink
=
false
);
void
Softmax
(
const
XTensor
&
x
,
XTensor
&
y
,
int
leadDim
);
/* de/dx */
void
_SoftmaxBackward
(
XTensor
*
gold
,
XTensor
*
y
,
XTensor
*
x
,
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论