Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
N
NiuTrans.Tensor
概览
Overview
Details
Activity
Cycle Analytics
版本库
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
问题
0
Issues
0
列表
Board
标记
里程碑
合并请求
0
Merge Requests
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
Snippets
成员
Collapse sidebar
Close sidebar
活动
图像
聊天
创建新问题
作业
提交
Issue Boards
Open sidebar
杨迪
NiuTrans.Tensor
Commits
ad3fc86f
Commit
ad3fc86f
authored
Sep 18, 2018
by
xiaotong
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
rename SetTMP
parent
22df3e17
显示空白字符变更
内嵌
并排
正在显示
38 个修改的文件
包含
69 行增加
和
51 行删除
+69
-51
source/tensor/XTensor.cpp
+16
-4
source/tensor/XTensor.h
+8
-2
source/tensor/core/arithmetic/Div.cpp
+1
-1
source/tensor/core/arithmetic/DivDim.cpp
+1
-1
source/tensor/core/arithmetic/MatrixMul.cpp
+2
-2
source/tensor/core/arithmetic/MatrixMulBatched.cpp
+2
-2
source/tensor/core/arithmetic/Multiply.cpp
+1
-1
source/tensor/core/arithmetic/MultiplyDim.cpp
+1
-1
source/tensor/core/arithmetic/Negate.cpp
+1
-1
source/tensor/core/arithmetic/Sign.cpp
+1
-1
source/tensor/core/arithmetic/Sub.cpp
+1
-1
source/tensor/core/arithmetic/SubDim.cpp
+1
-1
source/tensor/core/arithmetic/Sum.cpp
+1
-1
source/tensor/core/arithmetic/SumDim.cpp
+1
-1
source/tensor/core/getandset/Select.cpp
+1
-1
source/tensor/core/math/Clip.cpp
+1
-1
source/tensor/core/math/Normalize.cpp
+1
-1
source/tensor/core/math/Power.cpp
+1
-1
source/tensor/core/math/ScaleAndShift.cpp
+1
-1
source/tensor/core/math/Unary.cpp
+2
-2
source/tensor/core/movement/CopyIndexed.cpp
+1
-1
source/tensor/core/movement/CopyValues.cpp
+1
-1
source/tensor/core/reduce/ReduceMax.cpp
+1
-1
source/tensor/core/reduce/ReduceMean.cpp
+1
-1
source/tensor/core/reduce/ReduceSum.cpp
+2
-2
source/tensor/core/reduce/ReduceSumSquared.cpp
+1
-1
source/tensor/core/reduce/ReduceVariance.cpp
+1
-1
source/tensor/core/shape/Concatenate.cpp
+4
-4
source/tensor/core/shape/Merge.cpp
+3
-3
source/tensor/core/shape/Split.cpp
+1
-1
source/tensor/core/shape/Transpose.cpp
+1
-1
source/tensor/core/shape/Unsqueeze.cpp
+1
-1
source/tensor/function/HardTanH.cpp
+1
-1
source/tensor/function/Identity.cpp
+1
-1
source/tensor/function/LogSoftmax.cpp
+1
-1
source/tensor/function/Rectify.cpp
+1
-1
source/tensor/function/Sigmoid.cpp
+1
-1
source/tensor/function/Softmax.cpp
+1
-1
没有找到文件。
source/tensor/XTensor.cpp
查看文件 @
ad3fc86f
...
@@ -202,7 +202,7 @@ XTensor::~XTensor()
...
@@ -202,7 +202,7 @@ XTensor::~XTensor()
dims
[
0
]
=
-
dims
[
0
];
dims
[
0
]
=
-
dims
[
0
];
XTensor
*
newTensor
=
new
XTensor
(
order
,
dims
,
dataType
,
denseRatio
,
devID
,
mem
);
XTensor
*
newTensor
=
new
XTensor
(
order
,
dims
,
dataType
,
denseRatio
,
devID
,
mem
);
newTensor
->
SetTMP
();
newTensor
->
SetTMP
Flag
();
newTensor
->
data
=
data
;
newTensor
->
data
=
data
;
data
=
NULL
;
data
=
NULL
;
...
@@ -244,6 +244,7 @@ void XTensor::Init()
...
@@ -244,6 +244,7 @@ void XTensor::Init()
isInit
=
false
;
isInit
=
false
;
isTmp
=
false
;
isTmp
=
false
;
isGrad
=
false
;
isGrad
=
false
;
isVar
=
false
;
visitMark
=
0
;
visitMark
=
0
;
grad
=
NULL
;
grad
=
NULL
;
}
}
...
@@ -297,7 +298,7 @@ XTensor& XTensor::operator= (const XTensor& tensor)
...
@@ -297,7 +298,7 @@ XTensor& XTensor::operator= (const XTensor& tensor)
dims
[
0
]
=
-
dims
[
0
];
dims
[
0
]
=
-
dims
[
0
];
XTensor
*
newTensor
=
new
XTensor
(
order
,
dims
,
dataType
,
denseRatio
,
devID
,
mem
);
XTensor
*
newTensor
=
new
XTensor
(
order
,
dims
,
dataType
,
denseRatio
,
devID
,
mem
);
newTensor
->
SetTMP
();
newTensor
->
SetTMP
Flag
();
newTensor
->
data
=
data
;
newTensor
->
data
=
data
;
newTensor
->
dataHost
=
dataHost
;
newTensor
->
dataHost
=
dataHost
;
newTensor
->
signature
=
tensor
.
signature
;
newTensor
->
signature
=
tensor
.
signature
;
...
@@ -1125,7 +1126,7 @@ int XTensor::GetNonzeroSize()
...
@@ -1125,7 +1126,7 @@ int XTensor::GetNonzeroSize()
set the tensor as "temporary"
set the tensor as "temporary"
>> myIsTMP - the flag
>> myIsTMP - the flag
*/
*/
void
XTensor
::
SetTMP
(
bool
myIsTmp
)
void
XTensor
::
SetTMP
Flag
(
bool
myIsTmp
)
{
{
isTmp
=
myIsTmp
;
isTmp
=
myIsTmp
;
}
}
...
@@ -1134,12 +1135,23 @@ void XTensor::SetTMP(bool myIsTmp)
...
@@ -1134,12 +1135,23 @@ void XTensor::SetTMP(bool myIsTmp)
set the tensor as "keep-gradient"
set the tensor as "keep-gradient"
>> myIsGrad - the flag
>> myIsGrad - the flag
*/
*/
void
XTensor
::
SetGrad
(
bool
myIsGrad
)
void
XTensor
::
SetGrad
Flag
(
bool
myIsGrad
)
{
{
isGrad
=
myIsGrad
;
isGrad
=
myIsGrad
;
}
}
/*
/*
set the tensor as "variable"
>> myIsVar - the flag
*/
void
XTensor
::
SetVarFlag
(
bool
myIsVar
)
{
isVar
=
myIsVar
;
if
(
isVar
)
SetGradFlag
(
true
);
}
/*
resize a tensor with a specified tensor size
resize a tensor with a specified tensor size
>> myOrder - order of the tensor
>> myOrder - order of the tensor
>> myDimSize - the size of each dimension
>> myDimSize - the size of each dimension
...
...
source/tensor/XTensor.h
查看文件 @
ad3fc86f
...
@@ -145,6 +145,9 @@ public:
...
@@ -145,6 +145,9 @@ public:
/* indicates whether the tensor keeps the gradient when used as model parameters */
/* indicates whether the tensor keeps the gradient when used as model parameters */
bool
isGrad
;
bool
isGrad
;
/* indicates whether the tensor is used as paramters (or variables) */
bool
isVar
;
/* mark for traversing the gragh */
/* mark for traversing the gragh */
unsigned
int
visitMark
;
unsigned
int
visitMark
;
...
@@ -319,10 +322,13 @@ public:
...
@@ -319,10 +322,13 @@ public:
int
GetNonzeroSize
();
int
GetNonzeroSize
();
/* set the tensor as "temporary" */
/* set the tensor as "temporary" */
void
SetTMP
(
bool
myIsTmp
=
true
);
void
SetTMP
Flag
(
bool
myIsTmp
=
true
);
/* set the tensor as "keep-gradient" */
/* set the tensor as "keep-gradient" */
void
SetGrad
(
bool
myIsGrad
=
true
);
void
SetGradFlag
(
bool
myIsGrad
=
true
);
/* set the tensor as "variable" */
void
SetVarFlag
(
bool
myIsVar
=
true
);
/* resize a matrix with a specified matrix size */
/* resize a matrix with a specified matrix size */
bool
Resize
(
const
int
myOrder
,
const
int
*
myDimSize
,
bool
Resize
(
const
int
myOrder
,
const
int
*
myDimSize
,
...
...
source/tensor/core/arithmetic/Div.cpp
查看文件 @
ad3fc86f
...
@@ -181,7 +181,7 @@ where i is the index of the item
...
@@ -181,7 +181,7 @@ where i is the index of the item
XTensor
Div
(
const
XTensor
&
a
,
const
XTensor
&
b
,
DTYPE
alpha
,
int
leadingDim
)
XTensor
Div
(
const
XTensor
&
a
,
const
XTensor
&
b
,
DTYPE
alpha
,
int
leadingDim
)
{
{
XTensor
c
(
&
a
);
XTensor
c
(
&
a
);
c
.
SetTMP
();
c
.
SetTMP
Flag
();
int
n
=
GetDivDimIndex
(
a
,
b
);
int
n
=
GetDivDimIndex
(
a
,
b
);
...
...
source/tensor/core/arithmetic/DivDim.cpp
查看文件 @
ad3fc86f
...
@@ -150,7 +150,7 @@ i.e., a is divided with b by broadcasting
...
@@ -150,7 +150,7 @@ i.e., a is divided with b by broadcasting
XTensor
DivDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
int
n
,
DTYPE
alpha
)
XTensor
DivDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
int
n
,
DTYPE
alpha
)
{
{
XTensor
c
(
&
a
);
XTensor
c
(
&
a
);
c
.
SetTMP
();
c
.
SetTMP
Flag
();
/* call _Div function */
/* call _Div function */
_DivDim
(
&
a
,
&
b
,
&
c
,
n
,
alpha
);
_DivDim
(
&
a
,
&
b
,
&
c
,
n
,
alpha
);
...
...
source/tensor/core/arithmetic/MatrixMul.cpp
查看文件 @
ad3fc86f
...
@@ -249,7 +249,7 @@ XTensor MatrixMul(const XTensor &a, MATRIX_TRANS_TYPE transposedA,
...
@@ -249,7 +249,7 @@ XTensor MatrixMul(const XTensor &a, MATRIX_TRANS_TYPE transposedA,
float
dr
=
(
!
a
.
isSparse
||
!
b
.
isSparse
)
?
1.0
F
:
MAX
(
a
.
denseRatio
,
b
.
denseRatio
);
float
dr
=
(
!
a
.
isSparse
||
!
b
.
isSparse
)
?
1.0
F
:
MAX
(
a
.
denseRatio
,
b
.
denseRatio
);
XTensor
c
(
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
XTensor
c
(
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
c
.
SetTMP
();
c
.
SetTMP
Flag
();
/* call _MatrixMul function */
/* call _MatrixMul function */
_MatrixMul
(
&
a
,
transposedA
,
&
b
,
transposedB
,
&
c
,
alpha
,
0
,
parallelRunner
);
_MatrixMul
(
&
a
,
transposedA
,
&
b
,
transposedB
,
&
c
,
alpha
,
0
,
parallelRunner
);
...
@@ -299,7 +299,7 @@ XTensor MatrixMul(const XTensor &a, const XTensor &b,
...
@@ -299,7 +299,7 @@ XTensor MatrixMul(const XTensor &a, const XTensor &b,
float
dr
=
(
!
a
.
isSparse
||
!
b
.
isSparse
)
?
1.0
F
:
MAX
(
a
.
denseRatio
,
b
.
denseRatio
);
float
dr
=
(
!
a
.
isSparse
||
!
b
.
isSparse
)
?
1.0
F
:
MAX
(
a
.
denseRatio
,
b
.
denseRatio
);
XTensor
c
(
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
XTensor
c
(
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
c
.
SetTMP
();
c
.
SetTMP
Flag
();
/* call _MatrixMul function */
/* call _MatrixMul function */
_MatrixMul
(
&
a
,
X_NOTRANS
,
&
b
,
X_NOTRANS
,
&
c
,
alpha
,
0
,
parallelRunner
);
_MatrixMul
(
&
a
,
X_NOTRANS
,
&
b
,
X_NOTRANS
,
&
c
,
alpha
,
0
,
parallelRunner
);
...
...
source/tensor/core/arithmetic/MatrixMulBatched.cpp
查看文件 @
ad3fc86f
...
@@ -314,7 +314,7 @@ XTensor MatrixMulBatched(const XTensor &a, MATRIX_TRANS_TYPE transposedA, const
...
@@ -314,7 +314,7 @@ XTensor MatrixMulBatched(const XTensor &a, MATRIX_TRANS_TYPE transposedA, const
float
dr
=
(
!
a
.
isSparse
||
!
b
.
isSparse
)
?
1.0
F
:
MAX
(
a
.
denseRatio
,
b
.
denseRatio
);
float
dr
=
(
!
a
.
isSparse
||
!
b
.
isSparse
)
?
1.0
F
:
MAX
(
a
.
denseRatio
,
b
.
denseRatio
);
XTensor
c
(
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
XTensor
c
(
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
c
.
SetTMP
();
c
.
SetTMP
Flag
();
/*call _MatrixMulBatched function */
/*call _MatrixMulBatched function */
_MatrixMulBatched
(
&
a
,
transposedA
,
&
b
,
transposedB
,
&
c
,
alpha
,
0
,
parallelRunner
);
_MatrixMulBatched
(
&
a
,
transposedA
,
&
b
,
transposedB
,
&
c
,
alpha
,
0
,
parallelRunner
);
...
@@ -370,7 +370,7 @@ XTensor MatrixMulBatched(const XTensor &a, const XTensor &b,
...
@@ -370,7 +370,7 @@ XTensor MatrixMulBatched(const XTensor &a, const XTensor &b,
float
dr
=
(
!
a
.
isSparse
||
!
b
.
isSparse
)
?
1.0
F
:
MAX
(
a
.
denseRatio
,
b
.
denseRatio
);
float
dr
=
(
!
a
.
isSparse
||
!
b
.
isSparse
)
?
1.0
F
:
MAX
(
a
.
denseRatio
,
b
.
denseRatio
);
XTensor
c
(
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
XTensor
c
(
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
c
.
SetTMP
();
c
.
SetTMP
Flag
();
/*call _MatrixMulBatched function */
/*call _MatrixMulBatched function */
_MatrixMulBatched
(
&
a
,
X_NOTRANS
,
&
b
,
X_NOTRANS
,
&
c
,
alpha
,
0
,
parallelRunner
);
_MatrixMulBatched
(
&
a
,
X_NOTRANS
,
&
b
,
X_NOTRANS
,
&
c
,
alpha
,
0
,
parallelRunner
);
...
...
source/tensor/core/arithmetic/Multiply.cpp
查看文件 @
ad3fc86f
...
@@ -182,7 +182,7 @@ XTensor Multiply(const XTensor &a, const XTensor &b, DTYPE alpha, int leadingDim
...
@@ -182,7 +182,7 @@ XTensor Multiply(const XTensor &a, const XTensor &b, DTYPE alpha, int leadingDim
{
{
XTensor
c
(
&
a
);
XTensor
c
(
&
a
);
c
.
SetTMP
();
c
.
SetTMP
Flag
();
int
n
=
GetMultiplyDimIndex
(
a
,
b
);
int
n
=
GetMultiplyDimIndex
(
a
,
b
);
...
...
source/tensor/core/arithmetic/MultiplyDim.cpp
查看文件 @
ad3fc86f
...
@@ -148,7 +148,7 @@ i.e., a is multiplied with b by broadcasting
...
@@ -148,7 +148,7 @@ i.e., a is multiplied with b by broadcasting
XTensor
MultiplyDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
int
n
,
DTYPE
alpha
)
XTensor
MultiplyDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
int
n
,
DTYPE
alpha
)
{
{
XTensor
c
(
&
a
);
XTensor
c
(
&
a
);
c
.
SetTMP
();
c
.
SetTMP
Flag
();
/* call _Multiply function */
/* call _Multiply function */
_MultiplyDim
(
&
a
,
&
b
,
&
c
,
n
,
alpha
);
_MultiplyDim
(
&
a
,
&
b
,
&
c
,
n
,
alpha
);
...
...
source/tensor/core/arithmetic/Negate.cpp
查看文件 @
ad3fc86f
...
@@ -68,7 +68,7 @@ make a new tensor to keep the result and return it
...
@@ -68,7 +68,7 @@ make a new tensor to keep the result and return it
XTensor
Negate
(
const
XTensor
&
a
)
XTensor
Negate
(
const
XTensor
&
a
)
{
{
XTensor
b
(
&
a
);
XTensor
b
(
&
a
);
b
.
SetTMP
();
b
.
SetTMP
Flag
();
/* call _Negate function */
/* call _Negate function */
_Negate
(
&
a
,
&
b
);
_Negate
(
&
a
,
&
b
);
...
...
source/tensor/core/arithmetic/Sign.cpp
查看文件 @
ad3fc86f
...
@@ -74,7 +74,7 @@ make a new tensor to keep the result and return it
...
@@ -74,7 +74,7 @@ make a new tensor to keep the result and return it
XTensor
Sign
(
const
XTensor
&
a
)
XTensor
Sign
(
const
XTensor
&
a
)
{
{
XTensor
b
(
&
a
);
XTensor
b
(
&
a
);
b
.
SetTMP
();
b
.
SetTMP
Flag
();
/* call _Sign function */
/* call _Sign function */
_Sign
(
&
a
,
&
b
);
_Sign
(
&
a
,
&
b
);
...
...
source/tensor/core/arithmetic/Sub.cpp
查看文件 @
ad3fc86f
...
@@ -164,7 +164,7 @@ make a new tensor c to keep the result and return it
...
@@ -164,7 +164,7 @@ make a new tensor c to keep the result and return it
XTensor
Sub
(
const
XTensor
&
a
,
const
XTensor
&
b
,
DTYPE
beta
)
XTensor
Sub
(
const
XTensor
&
a
,
const
XTensor
&
b
,
DTYPE
beta
)
{
{
XTensor
c
(
&
a
);
XTensor
c
(
&
a
);
c
.
SetTMP
();
c
.
SetTMP
Flag
();
int
n
=
GetSubDimIndex
(
a
,
b
);
int
n
=
GetSubDimIndex
(
a
,
b
);
...
...
source/tensor/core/arithmetic/SubDim.cpp
查看文件 @
ad3fc86f
...
@@ -150,7 +150,7 @@ i.e., a is subtracted with b by broadcasting
...
@@ -150,7 +150,7 @@ i.e., a is subtracted with b by broadcasting
XTensor
SubDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
int
n
,
DTYPE
beta
)
XTensor
SubDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
int
n
,
DTYPE
beta
)
{
{
XTensor
c
(
&
a
);
XTensor
c
(
&
a
);
c
.
SetTMP
();
c
.
SetTMP
Flag
();
/* call _Sub function */
/* call _Sub function */
_SubDim
(
&
a
,
&
b
,
&
c
,
n
,
beta
);
_SubDim
(
&
a
,
&
b
,
&
c
,
n
,
beta
);
...
...
source/tensor/core/arithmetic/Sum.cpp
查看文件 @
ad3fc86f
...
@@ -169,7 +169,7 @@ make a new tensor c to keep the result and return it
...
@@ -169,7 +169,7 @@ make a new tensor c to keep the result and return it
XTensor
Sum
(
const
XTensor
&
a
,
const
XTensor
&
b
,
DTYPE
beta
)
XTensor
Sum
(
const
XTensor
&
a
,
const
XTensor
&
b
,
DTYPE
beta
)
{
{
XTensor
c
(
&
a
);
XTensor
c
(
&
a
);
c
.
SetTMP
();
c
.
SetTMP
Flag
();
int
n
=
GetSumDimIndex
(
a
,
b
);
int
n
=
GetSumDimIndex
(
a
,
b
);
...
...
source/tensor/core/arithmetic/SumDim.cpp
查看文件 @
ad3fc86f
...
@@ -150,7 +150,7 @@ i.e., a is summed with b by broadcasting
...
@@ -150,7 +150,7 @@ i.e., a is summed with b by broadcasting
XTensor
SumDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
int
n
,
DTYPE
beta
)
XTensor
SumDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
int
n
,
DTYPE
beta
)
{
{
XTensor
c
(
&
a
);
XTensor
c
(
&
a
);
c
.
SetTMP
();
c
.
SetTMP
Flag
();
/* call _Sum function */
/* call _Sum function */
_SumDim
(
&
a
,
&
b
,
&
c
,
n
,
beta
);
_SumDim
(
&
a
,
&
b
,
&
c
,
n
,
beta
);
...
...
source/tensor/core/getandset/Select.cpp
查看文件 @
ad3fc86f
...
@@ -111,7 +111,7 @@ XTensor SelectRange(const XTensor &a, int dim, int low, int high)
...
@@ -111,7 +111,7 @@ XTensor SelectRange(const XTensor &a, int dim, int low, int high)
float
dr
=
(
!
a
.
isSparse
)
?
1.0
F
:
a
.
denseRatio
;
float
dr
=
(
!
a
.
isSparse
)
?
1.0
F
:
a
.
denseRatio
;
XTensor
c
(
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
XTensor
c
(
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
c
.
SetTMP
();
c
.
SetTMP
Flag
();
/* call _SelectRange function */
/* call _SelectRange function */
_SelectRange
(
&
a
,
&
c
,
dim
,
low
,
high
);
_SelectRange
(
&
a
,
&
c
,
dim
,
low
,
high
);
...
...
source/tensor/core/math/Clip.cpp
查看文件 @
ad3fc86f
...
@@ -81,7 +81,7 @@ make a new tensor to keep the result and return it
...
@@ -81,7 +81,7 @@ make a new tensor to keep the result and return it
XTensor
Clip
(
const
XTensor
&
a
,
DTYPE
lower
,
DTYPE
upper
)
XTensor
Clip
(
const
XTensor
&
a
,
DTYPE
lower
,
DTYPE
upper
)
{
{
XTensor
b
(
&
a
);
XTensor
b
(
&
a
);
b
.
SetTMP
();
b
.
SetTMP
Flag
();
/* call _Clip function */
/* call _Clip function */
_Clip
(
&
a
,
&
b
,
lower
,
upper
);
_Clip
(
&
a
,
&
b
,
lower
,
upper
);
...
...
source/tensor/core/math/Normalize.cpp
查看文件 @
ad3fc86f
...
@@ -132,7 +132,7 @@ where a and b are the scalar and bias respectively, and \epsilon is the adjustme
...
@@ -132,7 +132,7 @@ where a and b are the scalar and bias respectively, and \epsilon is the adjustme
XTensor
Normalize
(
const
XTensor
&
input
,
int
dim
,
const
XTensor
&
mean
,
const
XTensor
&
var
,
const
XTensor
&
a
,
const
XTensor
&
b
,
DTYPE
epsilon
)
XTensor
Normalize
(
const
XTensor
&
input
,
int
dim
,
const
XTensor
&
mean
,
const
XTensor
&
var
,
const
XTensor
&
a
,
const
XTensor
&
b
,
DTYPE
epsilon
)
{
{
XTensor
output
(
&
input
);
XTensor
output
(
&
input
);
output
.
SetTMP
();
output
.
SetTMP
Flag
();
/* call _Normalize function */
/* call _Normalize function */
_Normalize
(
&
input
,
&
output
,
dim
,
&
mean
,
&
var
,
&
a
,
&
b
,
epsilon
);
_Normalize
(
&
input
,
&
output
,
dim
,
&
mean
,
&
var
,
&
a
,
&
b
,
epsilon
);
...
...
source/tensor/core/math/Power.cpp
查看文件 @
ad3fc86f
...
@@ -90,7 +90,7 @@ make a new tensor to keep the result and return it
...
@@ -90,7 +90,7 @@ make a new tensor to keep the result and return it
XTensor
Power
(
const
XTensor
&
a
,
DTYPE
p
)
XTensor
Power
(
const
XTensor
&
a
,
DTYPE
p
)
{
{
XTensor
b
(
&
a
);
XTensor
b
(
&
a
);
b
.
SetTMP
();
b
.
SetTMP
Flag
();
/* call _Power function */
/* call _Power function */
_Power
(
&
a
,
&
b
,
p
);
_Power
(
&
a
,
&
b
,
p
);
...
...
source/tensor/core/math/ScaleAndShift.cpp
查看文件 @
ad3fc86f
...
@@ -105,7 +105,7 @@ b = a * scale + shift
...
@@ -105,7 +105,7 @@ b = a * scale + shift
XTensor
ScaleAndShift
(
const
XTensor
&
a
,
DTYPE
scale
,
DTYPE
shift
)
XTensor
ScaleAndShift
(
const
XTensor
&
a
,
DTYPE
scale
,
DTYPE
shift
)
{
{
XTensor
b
(
&
a
);
XTensor
b
(
&
a
);
b
.
SetTMP
();
b
.
SetTMP
Flag
();
/* call _ScaleAndShift function */
/* call _ScaleAndShift function */
_ScaleAndShift
(
&
a
,
&
b
,
scale
,
shift
);
_ScaleAndShift
(
&
a
,
&
b
,
scale
,
shift
);
...
...
source/tensor/core/math/Unary.cpp
查看文件 @
ad3fc86f
...
@@ -65,7 +65,7 @@ void _funcNameMe(XTensor * a) \
...
@@ -65,7 +65,7 @@ void _funcNameMe(XTensor * a) \
XTensor funcName(const XTensor &a) \
XTensor funcName(const XTensor &a) \
{ \
{ \
XTensor b(&a); \
XTensor b(&a); \
b.SetTMP(); \
b.SetTMP
Flag
(); \
_funcName(&a, &b); \
_funcName(&a, &b); \
XLink::MakeLink(&a, NULL, &b, operationId); \
XLink::MakeLink(&a, NULL, &b, operationId); \
return b; \
return b; \
...
@@ -140,7 +140,7 @@ void _funcNameMe(XTensor * a) \
...
@@ -140,7 +140,7 @@ void _funcNameMe(XTensor * a) \
XTensor funcName(const XTensor &a) \
XTensor funcName(const XTensor &a) \
{ \
{ \
XTensor b(&a); \
XTensor b(&a); \
b.SetTMP(); \
b.SetTMP
Flag
(); \
_funcName(&a, &b); \
_funcName(&a, &b); \
XLink::MakeLink(&a, NULL, &b, operationId); \
XLink::MakeLink(&a, NULL, &b, operationId); \
return b; \
return b; \
...
...
source/tensor/core/movement/CopyIndexed.cpp
查看文件 @
ad3fc86f
...
@@ -130,7 +130,7 @@ XTensor CopyIndexed(const XTensor &s, int dim, int * srcIndex, int indexSize, in
...
@@ -130,7 +130,7 @@ XTensor CopyIndexed(const XTensor &s, int dim, int * srcIndex, int indexSize, in
float
dr
=
(
!
s
.
isSparse
)
?
1.0
F
:
s
.
denseRatio
;
float
dr
=
(
!
s
.
isSparse
)
?
1.0
F
:
s
.
denseRatio
;
XTensor
t
(
order
,
dimSize
,
s
.
dataType
,
dr
,
s
.
devID
,
s
.
mem
);
XTensor
t
(
order
,
dimSize
,
s
.
dataType
,
dr
,
s
.
devID
,
s
.
mem
);
t
.
SetTMP
();
t
.
SetTMP
Flag
();
/* call _CopyIndexed function */
/* call _CopyIndexed function */
_CopyIndexed
(
&
s
,
&
t
,
dim
,
srcIndex
,
indexSize
,
tgtIndex
,
copyNum
);
_CopyIndexed
(
&
s
,
&
t
,
dim
,
srcIndex
,
indexSize
,
tgtIndex
,
copyNum
);
...
...
source/tensor/core/movement/CopyValues.cpp
查看文件 @
ad3fc86f
...
@@ -108,7 +108,7 @@ make a new tensor to keep the result and return it
...
@@ -108,7 +108,7 @@ make a new tensor to keep the result and return it
XTensor
CopyValues
(
const
XTensor
&
s
,
XStream
*
stream
)
XTensor
CopyValues
(
const
XTensor
&
s
,
XStream
*
stream
)
{
{
XTensor
t
(
&
s
);
XTensor
t
(
&
s
);
t
.
SetTMP
();
t
.
SetTMP
Flag
();
/* call _CopyValues function */
/* call _CopyValues function */
_CopyValues
(
&
s
,
&
t
,
stream
);
_CopyValues
(
&
s
,
&
t
,
stream
);
...
...
source/tensor/core/reduce/ReduceMax.cpp
查看文件 @
ad3fc86f
...
@@ -114,7 +114,7 @@ XTensor ReduceMax(const XTensor &input, int dim)
...
@@ -114,7 +114,7 @@ XTensor ReduceMax(const XTensor &input, int dim)
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
XTensor
output
(
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
XTensor
output
(
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
output
.
SetTMP
();
output
.
SetTMP
Flag
();
/* call _ReduceMax function */
/* call _ReduceMax function */
_ReduceMax
(
&
input
,
&
output
,
dim
);
_ReduceMax
(
&
input
,
&
output
,
dim
);
...
...
source/tensor/core/reduce/ReduceMean.cpp
查看文件 @
ad3fc86f
...
@@ -71,7 +71,7 @@ XTensor ReduceMean(const XTensor &input, int dim)
...
@@ -71,7 +71,7 @@ XTensor ReduceMean(const XTensor &input, int dim)
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
XTensor
output
(
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
XTensor
output
(
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
output
.
SetTMP
();
output
.
SetTMP
Flag
();
/* call _ReduceMean function */
/* call _ReduceMean function */
_ReduceMean
(
&
input
,
&
output
,
dim
);
_ReduceMean
(
&
input
,
&
output
,
dim
);
...
...
source/tensor/core/reduce/ReduceSum.cpp
查看文件 @
ad3fc86f
...
@@ -225,7 +225,7 @@ XTensor ReduceSum(const XTensor &input, int dim, const XTensor &shift, DTYPE pow
...
@@ -225,7 +225,7 @@ XTensor ReduceSum(const XTensor &input, int dim, const XTensor &shift, DTYPE pow
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
XTensor
output
(
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
XTensor
output
(
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
output
.
SetTMP
();
output
.
SetTMP
Flag
();
/* call _ReduceSum function */
/* call _ReduceSum function */
_ReduceSum
(
&
input
,
&
output
,
dim
,
&
shift
,
power
,
isExp
);
_ReduceSum
(
&
input
,
&
output
,
dim
,
&
shift
,
power
,
isExp
);
...
@@ -271,7 +271,7 @@ XTensor ReduceSum(const XTensor &input, int dim, DTYPE power, bool isExp)
...
@@ -271,7 +271,7 @@ XTensor ReduceSum(const XTensor &input, int dim, DTYPE power, bool isExp)
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
XTensor
output
(
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
XTensor
output
(
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
output
.
SetTMP
();
output
.
SetTMP
Flag
();
/* call _ReduceSum function */
/* call _ReduceSum function */
_ReduceSum
(
&
input
,
&
output
,
dim
,
NULL
,
power
,
isExp
);
_ReduceSum
(
&
input
,
&
output
,
dim
,
NULL
,
power
,
isExp
);
...
...
source/tensor/core/reduce/ReduceSumSquared.cpp
查看文件 @
ad3fc86f
...
@@ -67,7 +67,7 @@ XTensor ReduceSumSquared(const XTensor &input, int dim, const XTensor &shift)
...
@@ -67,7 +67,7 @@ XTensor ReduceSumSquared(const XTensor &input, int dim, const XTensor &shift)
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
XTensor
output
(
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
XTensor
output
(
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
output
.
SetTMP
();
output
.
SetTMP
Flag
();
/* call _ReduceSumSquared function */
/* call _ReduceSumSquared function */
_ReduceSumSquared
(
&
input
,
&
output
,
dim
,
&
shift
);
_ReduceSumSquared
(
&
input
,
&
output
,
dim
,
&
shift
);
...
...
source/tensor/core/reduce/ReduceVariance.cpp
查看文件 @
ad3fc86f
...
@@ -70,7 +70,7 @@ XTensor ReduceVariance(const XTensor &input, int dim, const XTensor &mean)
...
@@ -70,7 +70,7 @@ XTensor ReduceVariance(const XTensor &input, int dim, const XTensor &mean)
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
XTensor
output
(
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
XTensor
output
(
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
output
.
SetTMP
();
output
.
SetTMP
Flag
();
/* call _ReduceVariance function */
/* call _ReduceVariance function */
_ReduceVariance
(
&
input
,
&
output
,
dim
,
&
mean
);
_ReduceVariance
(
&
input
,
&
output
,
dim
,
&
mean
);
...
...
source/tensor/core/shape/Concatenate.cpp
查看文件 @
ad3fc86f
...
@@ -93,7 +93,7 @@ XTensor Concatenate(const XList &smalls, int dim)
...
@@ -93,7 +93,7 @@ XTensor Concatenate(const XList &smalls, int dim)
float
dr
=
(
!
tensor
->
isSparse
)
?
1.0
F
:
tensor
->
denseRatio
;
float
dr
=
(
!
tensor
->
isSparse
)
?
1.0
F
:
tensor
->
denseRatio
;
XTensor
big
(
order
,
dimSize
,
tensor
->
dataType
,
dr
,
tensor
->
devID
,
tensor
->
mem
);
XTensor
big
(
order
,
dimSize
,
tensor
->
dataType
,
dr
,
tensor
->
devID
,
tensor
->
mem
);
big
.
SetTMP
();
big
.
SetTMP
Flag
();
/* call _Merge function */
/* call _Merge function */
_Merge
(
&
smalls
,
&
big
,
dim
);
_Merge
(
&
smalls
,
&
big
,
dim
);
...
@@ -121,7 +121,7 @@ XTensor Concatenate(const XList &smalls, int dim)
...
@@ -121,7 +121,7 @@ XTensor Concatenate(const XList &smalls, int dim)
float
dr
=
(
!
tensor
->
isSparse
)
?
1.0
F
:
tensor
->
denseRatio
;
float
dr
=
(
!
tensor
->
isSparse
)
?
1.0
F
:
tensor
->
denseRatio
;
XTensor
big
(
order
,
dimSize
,
tensor
->
dataType
,
dr
,
tensor
->
devID
,
tensor
->
mem
);
XTensor
big
(
order
,
dimSize
,
tensor
->
dataType
,
dr
,
tensor
->
devID
,
tensor
->
mem
);
big
.
SetTMP
();
big
.
SetTMP
Flag
();
/* call _ConcatenateSolely function */
/* call _ConcatenateSolely function */
_ConcatenateSolely
(
&
smalls
,
&
big
,
dim
);
_ConcatenateSolely
(
&
smalls
,
&
big
,
dim
);
...
@@ -194,7 +194,7 @@ XTensor Concatenate(const XTensor &smallA, const XTensor &smallB, int dim)
...
@@ -194,7 +194,7 @@ XTensor Concatenate(const XTensor &smallA, const XTensor &smallB, int dim)
float
dr
=
(
!
tensor
->
isSparse
)
?
1.0
F
:
tensor
->
denseRatio
;
float
dr
=
(
!
tensor
->
isSparse
)
?
1.0
F
:
tensor
->
denseRatio
;
XTensor
big
(
order
,
dimSize
,
tensor
->
dataType
,
dr
,
tensor
->
devID
,
tensor
->
mem
);
XTensor
big
(
order
,
dimSize
,
tensor
->
dataType
,
dr
,
tensor
->
devID
,
tensor
->
mem
);
big
.
SetTMP
();
big
.
SetTMP
Flag
();
/* call _Merge function */
/* call _Merge function */
_Merge
(
&
smalls
,
&
big
,
dim
);
_Merge
(
&
smalls
,
&
big
,
dim
);
...
@@ -222,7 +222,7 @@ XTensor Concatenate(const XTensor &smallA, const XTensor &smallB, int dim)
...
@@ -222,7 +222,7 @@ XTensor Concatenate(const XTensor &smallA, const XTensor &smallB, int dim)
float
dr
=
(
!
tensor
->
isSparse
)
?
1.0
F
:
tensor
->
denseRatio
;
float
dr
=
(
!
tensor
->
isSparse
)
?
1.0
F
:
tensor
->
denseRatio
;
XTensor
big
(
order
,
dimSize
,
tensor
->
dataType
,
dr
,
tensor
->
devID
,
tensor
->
mem
);
XTensor
big
(
order
,
dimSize
,
tensor
->
dataType
,
dr
,
tensor
->
devID
,
tensor
->
mem
);
big
.
SetTMP
();
big
.
SetTMP
Flag
();
/* call _ConcatenateSolely function */
/* call _ConcatenateSolely function */
_ConcatenateSolely
(
&
smalls
,
&
big
,
dim
);
_ConcatenateSolely
(
&
smalls
,
&
big
,
dim
);
...
...
source/tensor/core/shape/Merge.cpp
查看文件 @
ad3fc86f
...
@@ -183,7 +183,7 @@ XTensor Merge(const XTensor &s, int whereToMerge, int leadingDim)
...
@@ -183,7 +183,7 @@ XTensor Merge(const XTensor &s, int whereToMerge, int leadingDim)
float
dr
=
(
!
s
.
isSparse
)
?
1.0
F
:
s
.
denseRatio
;
float
dr
=
(
!
s
.
isSparse
)
?
1.0
F
:
s
.
denseRatio
;
XTensor
t
(
order
,
dimSize
,
s
.
dataType
,
dr
,
s
.
devID
,
s
.
mem
);
XTensor
t
(
order
,
dimSize
,
s
.
dataType
,
dr
,
s
.
devID
,
s
.
mem
);
t
.
SetTMP
();
t
.
SetTMP
Flag
();
/* call _Merge function */
/* call _Merge function */
_Merge
(
&
s
,
&
t
,
whereToMerge
,
leadingDim
);
_Merge
(
&
s
,
&
t
,
whereToMerge
,
leadingDim
);
...
@@ -334,7 +334,7 @@ XTensor Merge(const XList &smalls, int whereToMerge)
...
@@ -334,7 +334,7 @@ XTensor Merge(const XList &smalls, int whereToMerge)
float
dr
=
(
!
tensor
->
isSparse
)
?
1.0
F
:
tensor
->
denseRatio
;
float
dr
=
(
!
tensor
->
isSparse
)
?
1.0
F
:
tensor
->
denseRatio
;
XTensor
big
(
order
,
dimSize
,
tensor
->
dataType
,
dr
,
tensor
->
devID
,
tensor
->
mem
);
XTensor
big
(
order
,
dimSize
,
tensor
->
dataType
,
dr
,
tensor
->
devID
,
tensor
->
mem
);
big
.
SetTMP
();
big
.
SetTMP
Flag
();
/* call _Merge function */
/* call _Merge function */
_Merge
(
&
smalls
,
&
big
,
whereToMerge
);
_Merge
(
&
smalls
,
&
big
,
whereToMerge
);
...
@@ -371,7 +371,7 @@ XTensor Merge(const XTensor &smallA, const XTensor &smallB, int whereToMerge)
...
@@ -371,7 +371,7 @@ XTensor Merge(const XTensor &smallA, const XTensor &smallB, int whereToMerge)
float
dr
=
(
!
smallA
.
isSparse
)
?
1.0
F
:
smallA
.
denseRatio
;
float
dr
=
(
!
smallA
.
isSparse
)
?
1.0
F
:
smallA
.
denseRatio
;
XTensor
big
(
order
,
dimSize
,
smallA
.
dataType
,
dr
,
smallA
.
devID
,
smallA
.
mem
);
XTensor
big
(
order
,
dimSize
,
smallA
.
dataType
,
dr
,
smallA
.
devID
,
smallA
.
mem
);
big
.
SetTMP
();
big
.
SetTMP
Flag
();
XList
smalls
(
2
);
XList
smalls
(
2
);
smalls
.
Add
(
&
smallA
);
smalls
.
Add
(
&
smallA
);
...
...
source/tensor/core/shape/Split.cpp
查看文件 @
ad3fc86f
...
@@ -184,7 +184,7 @@ XTensor Split(const XTensor &s, int whereToSplit, int splitNum)
...
@@ -184,7 +184,7 @@ XTensor Split(const XTensor &s, int whereToSplit, int splitNum)
float
dr
=
(
!
s
.
isSparse
)
?
1.0
F
:
s
.
denseRatio
;
float
dr
=
(
!
s
.
isSparse
)
?
1.0
F
:
s
.
denseRatio
;
XTensor
t
(
order
,
dimSize
,
s
.
dataType
,
dr
,
s
.
devID
,
s
.
mem
);
XTensor
t
(
order
,
dimSize
,
s
.
dataType
,
dr
,
s
.
devID
,
s
.
mem
);
t
.
SetTMP
();
t
.
SetTMP
Flag
();
/* call _Split function */
/* call _Split function */
_Split
(
&
s
,
&
t
,
whereToSplit
,
splitNum
);
_Split
(
&
s
,
&
t
,
whereToSplit
,
splitNum
);
...
...
source/tensor/core/shape/Transpose.cpp
查看文件 @
ad3fc86f
...
@@ -138,7 +138,7 @@ XTensor Transpose(const XTensor &a, const int i, const int j)
...
@@ -138,7 +138,7 @@ XTensor Transpose(const XTensor &a, const int i, const int j)
float
dr
=
(
!
a
.
isSparse
)
?
1.0
F
:
a
.
denseRatio
;
float
dr
=
(
!
a
.
isSparse
)
?
1.0
F
:
a
.
denseRatio
;
XTensor
b
(
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
XTensor
b
(
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
b
.
SetTMP
();
b
.
SetTMP
Flag
();
/* call _Transpose function */
/* call _Transpose function */
_Transpose
(
&
a
,
&
b
,
i
,
j
);
_Transpose
(
&
a
,
&
b
,
i
,
j
);
...
...
source/tensor/core/shape/Unsqueeze.cpp
查看文件 @
ad3fc86f
...
@@ -122,7 +122,7 @@ XTensor Unsqueeze(const XTensor &a, int dim, int dSize)
...
@@ -122,7 +122,7 @@ XTensor Unsqueeze(const XTensor &a, int dim, int dSize)
float
dr
=
(
!
a
.
isSparse
)
?
1.0
F
:
a
.
denseRatio
;
float
dr
=
(
!
a
.
isSparse
)
?
1.0
F
:
a
.
denseRatio
;
XTensor
b
(
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
XTensor
b
(
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
b
.
SetTMP
();
b
.
SetTMP
Flag
();
/* call _Unsqueeze function */
/* call _Unsqueeze function */
_Unsqueeze
(
&
a
,
&
b
,
dim
,
dSize
);
_Unsqueeze
(
&
a
,
&
b
,
dim
,
dSize
);
...
...
source/tensor/function/HardTanH.cpp
查看文件 @
ad3fc86f
...
@@ -72,7 +72,7 @@ y = 1 if x > 1
...
@@ -72,7 +72,7 @@ y = 1 if x > 1
XTensor
HardTanH
(
const
XTensor
&
x
)
XTensor
HardTanH
(
const
XTensor
&
x
)
{
{
XTensor
y
(
&
x
);
XTensor
y
(
&
x
);
y
.
SetTMP
();
y
.
SetTMP
Flag
();
/* call _HardTanH function */
/* call _HardTanH function */
_HardTanH
(
&
x
,
&
y
);
_HardTanH
(
&
x
,
&
y
);
...
...
source/tensor/function/Identity.cpp
查看文件 @
ad3fc86f
...
@@ -46,7 +46,7 @@ make a new tensor to keep the result and return it
...
@@ -46,7 +46,7 @@ make a new tensor to keep the result and return it
XTensor
Identity
(
const
XTensor
&
x
)
XTensor
Identity
(
const
XTensor
&
x
)
{
{
XTensor
y
(
&
x
);
XTensor
y
(
&
x
);
y
.
SetTMP
();
y
.
SetTMP
Flag
();
/* call _Identity function */
/* call _Identity function */
_Identity
(
&
x
,
&
y
);
_Identity
(
&
x
,
&
y
);
...
...
source/tensor/function/LogSoftmax.cpp
查看文件 @
ad3fc86f
...
@@ -181,7 +181,7 @@ XTensor LogSoftmax(const XTensor &x, int leadDim)
...
@@ -181,7 +181,7 @@ XTensor LogSoftmax(const XTensor &x, int leadDim)
ld
=
x
.
order
-
1
;
ld
=
x
.
order
-
1
;
XTensor
y
(
&
x
);
XTensor
y
(
&
x
);
y
.
SetTMP
();
y
.
SetTMP
Flag
();
/* call _LogSoftmax function */
/* call _LogSoftmax function */
_LogSoftmax
(
&
x
,
&
y
,
ld
);
_LogSoftmax
(
&
x
,
&
y
,
ld
);
...
...
source/tensor/function/Rectify.cpp
查看文件 @
ad3fc86f
...
@@ -65,7 +65,7 @@ make a new tensor to keep the result and return it
...
@@ -65,7 +65,7 @@ make a new tensor to keep the result and return it
XTensor
Rectify
(
const
XTensor
&
x
)
XTensor
Rectify
(
const
XTensor
&
x
)
{
{
XTensor
y
(
&
x
);
XTensor
y
(
&
x
);
y
.
SetTMP
();
y
.
SetTMP
Flag
();
/* call _Rectify function */
/* call _Rectify function */
_Rectify
(
&
x
,
&
y
);
_Rectify
(
&
x
,
&
y
);
...
...
source/tensor/function/Sigmoid.cpp
查看文件 @
ad3fc86f
...
@@ -63,7 +63,7 @@ make a new tensor to keep the result and return it
...
@@ -63,7 +63,7 @@ make a new tensor to keep the result and return it
XTensor
Sigmoid
(
const
XTensor
&
x
)
XTensor
Sigmoid
(
const
XTensor
&
x
)
{
{
XTensor
y
(
&
x
);
XTensor
y
(
&
x
);
y
.
SetTMP
();
y
.
SetTMP
Flag
();
/* call _Sigmoid function */
/* call _Sigmoid function */
_Sigmoid
(
&
x
,
&
y
);
_Sigmoid
(
&
x
,
&
y
);
...
...
source/tensor/function/Softmax.cpp
查看文件 @
ad3fc86f
...
@@ -136,7 +136,7 @@ XTensor Softmax(const XTensor &x, int leadDim)
...
@@ -136,7 +136,7 @@ XTensor Softmax(const XTensor &x, int leadDim)
ld
=
x
.
order
-
1
;
ld
=
x
.
order
-
1
;
XTensor
y
(
&
x
);
XTensor
y
(
&
x
);
y
.
SetTMP
();
y
.
SetTMP
Flag
();
/* call _Softmax function */
/* call _Softmax function */
_Softmax
(
&
x
,
&
y
,
ld
);
_Softmax
(
&
x
,
&
y
,
ld
);
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论