Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
N
NiuTrans.Tensor
概览
Overview
Details
Activity
Cycle Analytics
版本库
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
问题
8
Issues
8
列表
Board
标记
里程碑
合并请求
0
Merge Requests
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
Snippets
成员
Collapse sidebar
Close sidebar
活动
图像
聊天
创建新问题
作业
提交
Issue Boards
Open sidebar
NiuTrans
NiuTrans.Tensor
Commits
b3ecba16
Commit
b3ecba16
authored
Oct 23, 2019
by
liyinqiao
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Update IsSameShaped function.
parent
ddeebb47
隐藏空白字符变更
内嵌
并排
正在显示
48 个修改的文件
包含
126 行增加
和
95 行删除
+126
-95
source/network/XBackwardMath.cpp
+2
-2
source/network/XBackwardShape.cpp
+1
-1
source/network/XNoder.cpp
+1
-1
source/sample/transformer/T2TSearch.cpp
+1
-1
source/tensor/XTensor.cpp
+1
-1
source/tensor/core/CHeader.h
+1
-0
source/tensor/core/arithmetic/Div.cpp
+2
-2
source/tensor/core/arithmetic/DivDim.cpp
+2
-2
source/tensor/core/arithmetic/Mask.cpp
+1
-1
source/tensor/core/arithmetic/MatrixMulBatched.cpp
+3
-3
source/tensor/core/arithmetic/MulAndShift.cpp
+1
-1
source/tensor/core/arithmetic/Multiply.cpp
+2
-2
source/tensor/core/arithmetic/MultiplyDim.cpp
+3
-3
source/tensor/core/arithmetic/Sub.cpp
+2
-2
source/tensor/core/arithmetic/SubDim.cpp
+2
-2
source/tensor/core/arithmetic/Sum.cpp
+2
-2
source/tensor/core/arithmetic/SumDim.cpp
+3
-3
source/tensor/core/arithmetic/XTensorBLAS.cu
+3
-3
source/tensor/core/math/Binary.cpp
+3
-3
source/tensor/core/math/Binary.cu
+1
-1
source/tensor/core/math/Clip.cpp
+2
-2
source/tensor/core/math/Clip.cu
+1
-1
source/tensor/core/math/Compare.cpp
+3
-3
source/tensor/core/math/Normalize.cpp
+4
-4
source/tensor/core/math/ScaleAndShift.cpp
+1
-1
source/tensor/core/math/Unary.cpp
+3
-3
source/tensor/core/math/Unary.cu
+1
-1
source/tensor/core/movement/CopyInGrid.cpp
+1
-1
source/tensor/core/reduce/ReduceSum.cpp
+1
-1
source/tensor/core/shape/Concatenate.cpp
+4
-4
source/tensor/core/shape/IsSameShaped.cpp
+26
-2
source/tensor/core/shape/IsSameShaped.h
+8
-2
source/tensor/core/shape/Merge.cpp
+1
-1
source/tensor/core/shape/Reshape.cpp
+1
-1
source/tensor/core/shape/Squeeze.cpp
+2
-2
source/tensor/core/sort/Sort.cpp
+1
-1
source/tensor/function/HardTanH.cpp
+2
-2
source/tensor/function/Identity.cpp
+2
-2
source/tensor/function/LogSoftmax.cpp
+3
-3
source/tensor/function/LogSoftmax.cu
+1
-1
source/tensor/function/Loss.cpp
+3
-3
source/tensor/function/Loss.cu
+2
-2
source/tensor/function/Rectify.cpp
+2
-2
source/tensor/function/Sigmoid.cpp
+2
-2
source/tensor/function/Softmax.cpp
+3
-3
source/tensor/function/Softmax.cu
+1
-1
source/tensor/loss/CrossEntropy.cpp
+7
-7
source/tensor/loss/CrossEntropy.cu
+1
-1
没有找到文件。
source/network/XBackwardMath.cpp
查看文件 @
b3ecba16
...
@@ -381,7 +381,7 @@ void XMathGrad::GradDiv(XTensor * node, bool isEfficient)
...
@@ -381,7 +381,7 @@ void XMathGrad::GradDiv(XTensor * node, bool isEfficient)
XNoder
::
MakeGrad
(
a
);
XNoder
::
MakeGrad
(
a
);
XNoder
::
MakeGrad
(
b
);
XNoder
::
MakeGrad
(
b
);
CheckNTErrors
(
IsSameShaped
(
a
,
b
),
"Wrong sized input tensors!"
);
CheckNTErrors
(
_
IsSameShaped
(
a
,
b
),
"Wrong sized input tensors!"
);
_Div
(
node
->
grad
,
b
,
a
->
grad
,
1.0
F
);
_Div
(
node
->
grad
,
b
,
a
->
grad
,
1.0
F
);
...
@@ -726,7 +726,7 @@ void XMathGrad::GradMultiply(XTensor * node, bool isEfficient)
...
@@ -726,7 +726,7 @@ void XMathGrad::GradMultiply(XTensor * node, bool isEfficient)
XTensor
*
a
=
income
.
tails
[
0
];
XTensor
*
a
=
income
.
tails
[
0
];
XTensor
*
b
=
income
.
tails
[
1
];
XTensor
*
b
=
income
.
tails
[
1
];
CheckNTErrors
(
IsSameShaped
(
a
,
b
),
"Wrong sized input tensors!"
);
CheckNTErrors
(
_
IsSameShaped
(
a
,
b
),
"Wrong sized input tensors!"
);
if
(
!
isEfficient
||
a
->
isGrad
)
{
if
(
!
isEfficient
||
a
->
isGrad
)
{
XNoder
::
MakeGrad
(
a
);
XNoder
::
MakeGrad
(
a
);
...
...
source/network/XBackwardShape.cpp
查看文件 @
b3ecba16
...
@@ -281,7 +281,7 @@ void XShapeGrad::GradMergeList(XTensor * node, bool isEfficient)
...
@@ -281,7 +281,7 @@ void XShapeGrad::GradMergeList(XTensor * node, bool isEfficient)
smallsGrad
.
Add
(
tail
->
grad
);
smallsGrad
.
Add
(
tail
->
grad
);
if
(
i
>
1
){
if
(
i
>
1
){
CheckNTErrors
(
IsSameShaped
(
last
,
tail
),
CheckNTErrors
(
_
IsSameShaped
(
last
,
tail
),
"Input tensors must be of the same size!"
);
"Input tensors must be of the same size!"
);
}
}
...
...
source/network/XNoder.cpp
查看文件 @
b3ecba16
...
@@ -29,7 +29,7 @@ void XNoder::MakeGrad(XTensor * node)
...
@@ -29,7 +29,7 @@ void XNoder::MakeGrad(XTensor * node)
if
(
node
==
NULL
)
if
(
node
==
NULL
)
return
;
return
;
if
(
!
IsSameShaped
(
node
,
node
->
grad
)){
if
(
!
_
IsSameShaped
(
node
,
node
->
grad
)){
delete
node
->
grad
;
delete
node
->
grad
;
node
->
grad
=
NewTensor
(
node
);
node
->
grad
=
NewTensor
(
node
);
node
->
grad
->
SetZeroAll
();
node
->
grad
->
SetZeroAll
();
...
...
source/sample/transformer/T2TSearch.cpp
查看文件 @
b3ecba16
...
@@ -319,7 +319,7 @@ void T2TSearch::Generate(T2TStateBundle * beam)
...
@@ -319,7 +319,7 @@ void T2TSearch::Generate(T2TStateBundle * beam)
for
(
int
i
=
0
;
i
<
indexGPU
.
unitNum
;
i
++
)
for
(
int
i
=
0
;
i
<
indexGPU
.
unitNum
;
i
++
)
indexGPU
.
SetInt
(
i
*
stride
+
indexGPU
.
GetInt
(
i
),
i
);
indexGPU
.
SetInt
(
i
*
stride
+
indexGPU
.
GetInt
(
i
),
i
);
CheckNTErrors
(
IsSameShaped
(
&
prob
,
&
probPath
),
"Wrong tensor shape!"
);
CheckNTErrors
(
IsSameShaped
(
prob
,
probPath
),
"Wrong tensor shape!"
);
/* sequence probability of top-k candidates */
/* sequence probability of top-k candidates */
XTensor
probPathTopK
;
XTensor
probPathTopK
;
...
...
source/tensor/XTensor.cpp
查看文件 @
b3ecba16
...
@@ -360,7 +360,7 @@ XTensor& XTensor::operator= (const XTensor& tensor)
...
@@ -360,7 +360,7 @@ XTensor& XTensor::operator= (const XTensor& tensor)
XLink
::
ClearOutgoing
(
this
);
XLink
::
ClearOutgoing
(
this
);
XLink
::
ClearIncoming
(
this
);
XLink
::
ClearIncoming
(
this
);
if
(
!
IsSameShaped
(
this
,
&
tensor
))
if
(
!
_
IsSameShaped
(
this
,
&
tensor
))
Resize
(
tensor
.
order
,
tensor
.
dimSize
,
tensor
.
dataType
,
tensor
.
denseRatio
);
Resize
(
tensor
.
order
,
tensor
.
dimSize
,
tensor
.
dataType
,
tensor
.
denseRatio
);
_Identity
(
&
tensor
,
this
);
_Identity
(
&
tensor
,
this
);
...
...
source/tensor/core/CHeader.h
查看文件 @
b3ecba16
...
@@ -93,5 +93,6 @@
...
@@ -93,5 +93,6 @@
#include "utilities/XMatrixSegment.h"
#include "utilities/XMatrixSegment.h"
#include "utilities/FlushToMem.h"
#include "utilities/FlushToMem.h"
#include "utilities/CheckData.h"
#include "utilities/CheckData.h"
#include "utilities/SetAscendingOrder.h"
#endif // __CHEADER_H__
#endif // __CHEADER_H__
source/tensor/core/arithmetic/Div.cpp
查看文件 @
b3ecba16
...
@@ -169,7 +169,7 @@ int GetDivDimIndex(const XTensor &a, const XTensor &b)
...
@@ -169,7 +169,7 @@ int GetDivDimIndex(const XTensor &a, const XTensor &b)
{
{
if
(
a
.
order
<
b
.
order
)
if
(
a
.
order
<
b
.
order
)
return
-
1
;
return
-
1
;
if
(
IsSameShaped
(
&
a
,
&
b
))
if
(
IsSameShaped
(
a
,
b
))
return
-
1
;
return
-
1
;
int
hitCount
=
0
;
int
hitCount
=
0
;
...
@@ -254,7 +254,7 @@ where i is the index of the item
...
@@ -254,7 +254,7 @@ where i is the index of the item
*/
*/
void
Div
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
alpha
,
int
leadingDim
)
void
Div
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
alpha
,
int
leadingDim
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
&
a
,
&
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
(
&
c
,
&
a
);
}
}
...
...
source/tensor/core/arithmetic/DivDim.cpp
查看文件 @
b3ecba16
...
@@ -57,7 +57,7 @@ void _DivDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE alp
...
@@ -57,7 +57,7 @@ void _DivDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE alp
CheckDev
(
a
->
devID
,
b
->
devID
);
CheckDev
(
a
->
devID
,
b
->
devID
);
if
(
IsSameShaped
(
a
,
b
)){
if
(
_
IsSameShaped
(
a
,
b
)){
_Div
(
a
,
b
,
c
,
alpha
);
_Div
(
a
,
b
,
c
,
alpha
);
return
;
return
;
}
}
...
@@ -189,7 +189,7 @@ i.e., a is divided with b by broadcasting
...
@@ -189,7 +189,7 @@ i.e., a is divided with b by broadcasting
*/
*/
void
DivDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
alpha
)
void
DivDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
alpha
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
&
a
,
&
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
(
&
c
,
&
a
);
}
}
...
...
source/tensor/core/arithmetic/Mask.cpp
查看文件 @
b3ecba16
...
@@ -172,7 +172,7 @@ where i is the index of the element
...
@@ -172,7 +172,7 @@ where i is the index of the element
*/
*/
void
Mask
(
const
XTensor
&
a
,
const
XTensor
&
mask
,
XTensor
&
c
,
DTYPE
alpha
)
void
Mask
(
const
XTensor
&
a
,
const
XTensor
&
mask
,
XTensor
&
c
,
DTYPE
alpha
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
&
a
,
&
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
(
&
c
,
&
a
);
}
}
...
...
source/tensor/core/arithmetic/MatrixMulBatched.cpp
查看文件 @
b3ecba16
...
@@ -243,9 +243,9 @@ void _MatrixMulBatchedCPU(const TensorList * a, MATRIX_TRANS_TYPE transposedA,
...
@@ -243,9 +243,9 @@ void _MatrixMulBatchedCPU(const TensorList * a, MATRIX_TRANS_TYPE transposedA,
XTensor
*
ai
=
(
XTensor
*
)
a
->
GetItem
(
i
);
XTensor
*
ai
=
(
XTensor
*
)
a
->
GetItem
(
i
);
XTensor
*
bi
=
(
XTensor
*
)
b
->
GetItem
(
i
);
XTensor
*
bi
=
(
XTensor
*
)
b
->
GetItem
(
i
);
XTensor
*
ci
=
(
XTensor
*
)
c
->
GetItem
(
i
);
XTensor
*
ci
=
(
XTensor
*
)
c
->
GetItem
(
i
);
if
(
!
IsSameShaped
(
aim
,
ai
)
||
if
(
!
_
IsSameShaped
(
aim
,
ai
)
||
!
IsSameShaped
(
bim
,
bi
)
||
!
_
IsSameShaped
(
bim
,
bi
)
||
!
IsSameShaped
(
cim
,
ci
))
!
_
IsSameShaped
(
cim
,
ci
))
{
{
isUniform
=
false
;
isUniform
=
false
;
break
;
break
;
...
...
source/tensor/core/arithmetic/MulAndShift.cpp
查看文件 @
b3ecba16
...
@@ -37,7 +37,7 @@ int GetSumIndex(const XTensor &a, const XTensor &b)
...
@@ -37,7 +37,7 @@ int GetSumIndex(const XTensor &a, const XTensor &b)
{
{
if
(
a
.
order
<
b
.
order
)
if
(
a
.
order
<
b
.
order
)
return
-
1
;
return
-
1
;
if
(
IsSameShaped
(
&
a
,
&
b
))
if
(
IsSameShaped
(
a
,
b
))
return
-
1
;
return
-
1
;
int
hitCount
=
0
;
int
hitCount
=
0
;
...
...
source/tensor/core/arithmetic/Multiply.cpp
查看文件 @
b3ecba16
...
@@ -170,7 +170,7 @@ int GetMultiplyDimIndex(const XTensor &a, const XTensor &b)
...
@@ -170,7 +170,7 @@ int GetMultiplyDimIndex(const XTensor &a, const XTensor &b)
{
{
if
(
a
.
order
<
b
.
order
)
if
(
a
.
order
<
b
.
order
)
return
-
1
;
return
-
1
;
if
(
IsSameShaped
(
&
a
,
&
b
))
if
(
IsSameShaped
(
a
,
b
))
return
-
1
;
return
-
1
;
int
hitCount
=
0
;
int
hitCount
=
0
;
...
@@ -255,7 +255,7 @@ where i is the index of the item
...
@@ -255,7 +255,7 @@ where i is the index of the item
*/
*/
void
Multiply
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
alpha
,
int
leadingDim
)
void
Multiply
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
alpha
,
int
leadingDim
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
&
a
,
&
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
(
&
c
,
&
a
);
}
}
...
...
source/tensor/core/arithmetic/MultiplyDim.cpp
查看文件 @
b3ecba16
...
@@ -58,7 +58,7 @@ void _MultiplyDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYP
...
@@ -58,7 +58,7 @@ void _MultiplyDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYP
CheckDev
(
a
->
devID
,
b
->
devID
);
CheckDev
(
a
->
devID
,
b
->
devID
);
if
(
IsSameShaped
(
a
,
b
)){
if
(
_
IsSameShaped
(
a
,
b
)){
_Multiply
(
a
,
b
,
c
,
alpha
);
_Multiply
(
a
,
b
,
c
,
alpha
);
return
;
return
;
}
}
...
@@ -204,7 +204,7 @@ i.e., a is multiplied with b by broadcasting
...
@@ -204,7 +204,7 @@ i.e., a is multiplied with b by broadcasting
*/
*/
void
MultiplyDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
)
void
MultiplyDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
&
a
,
&
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
(
&
c
,
&
a
);
}
}
...
@@ -372,7 +372,7 @@ where some of dimensions of b can be of size 1
...
@@ -372,7 +372,7 @@ where some of dimensions of b can be of size 1
*/
*/
void
MultiplyBroadcast
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
)
void
MultiplyBroadcast
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
&
a
,
&
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
(
&
c
,
&
a
);
}
}
...
...
source/tensor/core/arithmetic/Sub.cpp
查看文件 @
b3ecba16
...
@@ -150,7 +150,7 @@ int GetSubDimIndex(const XTensor &a, const XTensor &b)
...
@@ -150,7 +150,7 @@ int GetSubDimIndex(const XTensor &a, const XTensor &b)
{
{
if
(
a
.
order
<
b
.
order
)
if
(
a
.
order
<
b
.
order
)
return
-
1
;
return
-
1
;
if
(
IsSameShaped
(
&
a
,
&
b
))
if
(
IsSameShaped
(
a
,
b
))
return
-
1
;
return
-
1
;
int
hitCount
=
0
;
int
hitCount
=
0
;
...
@@ -224,7 +224,7 @@ tensor subtraction c = a - b * \beta
...
@@ -224,7 +224,7 @@ tensor subtraction c = a - b * \beta
*/
*/
void
Sub
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
)
void
Sub
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
&
a
,
&
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
(
&
c
,
&
a
);
}
}
...
...
source/tensor/core/arithmetic/SubDim.cpp
查看文件 @
b3ecba16
...
@@ -62,7 +62,7 @@ void _SubDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE bet
...
@@ -62,7 +62,7 @@ void _SubDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE bet
return
;
return
;
}
}
if
(
IsSameShaped
(
a
,
b
))
{
if
(
_
IsSameShaped
(
a
,
b
))
{
_Sub
(
a
,
b
,
c
,
beta
);
_Sub
(
a
,
b
,
c
,
beta
);
return
;
return
;
}
}
...
@@ -189,7 +189,7 @@ i.e., a is subtracted with b by broadcasting
...
@@ -189,7 +189,7 @@ i.e., a is subtracted with b by broadcasting
*/
*/
void
SubDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
beta
)
void
SubDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
beta
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
&
a
,
&
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
(
&
c
,
&
a
);
}
}
...
...
source/tensor/core/arithmetic/Sum.cpp
查看文件 @
b3ecba16
...
@@ -184,7 +184,7 @@ int GetSumDimIndex(const XTensor &a, const XTensor &b)
...
@@ -184,7 +184,7 @@ int GetSumDimIndex(const XTensor &a, const XTensor &b)
{
{
if
(
a
.
order
<
b
.
order
)
if
(
a
.
order
<
b
.
order
)
return
-
1
;
return
-
1
;
if
(
IsSameShaped
(
&
a
,
&
b
))
if
(
IsSameShaped
(
a
,
b
))
return
-
1
;
return
-
1
;
int
hitCount
=
0
;
int
hitCount
=
0
;
...
@@ -257,7 +257,7 @@ tensor summation c = a + b * \beta
...
@@ -257,7 +257,7 @@ tensor summation c = a + b * \beta
*/
*/
void
Sum
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
)
void
Sum
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
&
a
,
&
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
(
&
c
,
&
a
);
}
}
...
...
source/tensor/core/arithmetic/SumDim.cpp
查看文件 @
b3ecba16
...
@@ -65,7 +65,7 @@ void _SumDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE bet
...
@@ -65,7 +65,7 @@ void _SumDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE bet
return
;
return
;
}
}
if
(
IsSameShaped
(
a
,
b
)){
if
(
_
IsSameShaped
(
a
,
b
)){
_Sum
(
a
,
b
,
c
,
beta
);
_Sum
(
a
,
b
,
c
,
beta
);
return
;
return
;
}
}
...
@@ -206,7 +206,7 @@ i.e., a is summed with b by broadcasting
...
@@ -206,7 +206,7 @@ i.e., a is summed with b by broadcasting
*/
*/
void
SumDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
beta
)
void
SumDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
beta
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
&
a
,
&
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
(
&
c
,
&
a
);
}
}
...
@@ -375,7 +375,7 @@ c = a + b * \beta
...
@@ -375,7 +375,7 @@ c = a + b * \beta
*/
*/
void
SumBroadcast
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
)
void
SumBroadcast
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
&
a
,
&
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
(
&
c
,
&
a
);
}
}
...
...
source/tensor/core/arithmetic/XTensorBLAS.cu
查看文件 @
b3ecba16
...
@@ -225,9 +225,9 @@ void _CudaBLASMatrixMULList(cublasHandle_t * handle,
...
@@ -225,9 +225,9 @@ void _CudaBLASMatrixMULList(cublasHandle_t * handle,
XTensor * ai = (XTensor*)a->GetItem(i);
XTensor * ai = (XTensor*)a->GetItem(i);
XTensor * bi = (XTensor*)b->GetItem(i);
XTensor * bi = (XTensor*)b->GetItem(i);
XTensor * ci = (XTensor*)c->GetItem(i);
XTensor * ci = (XTensor*)c->GetItem(i);
if (!IsSameShaped(aim, ai) ||
if (!
_
IsSameShaped(aim, ai) ||
!IsSameShaped(bim, bi) ||
!
_
IsSameShaped(bim, bi) ||
!IsSameShaped(cim, ci))
!
_
IsSameShaped(cim, ci))
{
{
isUniform = false;
isUniform = false;
break;
break;
...
...
source/tensor/core/math/Binary.cpp
查看文件 @
b3ecba16
...
@@ -78,7 +78,7 @@ void _funcName(const XTensor * a, XTensor * b, T num)
...
@@ -78,7 +78,7 @@ void _funcName(const XTensor * a, XTensor * b, T num)
_cudaFuncName(a, b, num); \
_cudaFuncName(a, b, num); \
return; \
return; \
} \
} \
CheckNTErrors((IsSameShaped(a, b)), \
CheckNTErrors((
_
IsSameShaped(a, b)), \
"Input tensors should have the same data type!"); \
"Input tensors should have the same data type!"); \
if (a->dataType == X_INT) { \
if (a->dataType == X_INT) { \
int * d = (int*)a->data; \
int * d = (int*)a->data; \
...
@@ -113,7 +113,7 @@ void _funcName(const XTensor * a, XTensor * b, T num)
...
@@ -113,7 +113,7 @@ void _funcName(const XTensor * a, XTensor * b, T num)
if (a->devID >= 0) { \
if (a->devID >= 0) { \
ShowNTErrors("No GPU devices support!") \
ShowNTErrors("No GPU devices support!") \
} \
} \
CheckNTErrors((IsSameShaped(a, b)), \
CheckNTErrors((
_
IsSameShaped(a, b)), \
"Input tensors should have the same data type!"); \
"Input tensors should have the same data type!"); \
if (a->dataType == X_INT) { \
if (a->dataType == X_INT) { \
int * d = (int*)a->data; \
int * d = (int*)a->data; \
...
@@ -182,7 +182,7 @@ template XTensor funcName<double>(const XTensor&, double);
...
@@ -182,7 +182,7 @@ template XTensor funcName<double>(const XTensor&, double);
template<class T> \
template<class T> \
void funcName(const XTensor &a, XTensor &b, T num) \
void funcName(const XTensor &a, XTensor &b, T num) \
{ \
{ \
if (!b.isInit || !IsSameShaped(
&a, &
b)) { \
if (!b.isInit || !IsSameShaped(
a,
b)) { \
InitTensor(&b, &a); \
InitTensor(&b, &a); \
} \
} \
_funcName(&a, &b, num); \
_funcName(&a, &b, num); \
...
...
source/tensor/core/math/Binary.cu
查看文件 @
b3ecba16
...
@@ -90,7 +90,7 @@ void Kernel##funcName(T1 * a, T1 * b, int size, T2 num)
...
@@ -90,7 +90,7 @@ void Kernel##funcName(T1 * a, T1 * b, int size, T2 num)
template<class T> \
template<class T> \
void _Cuda##funcName(const XTensor * a, XTensor * b, T num) \
void _Cuda##funcName(const XTensor * a, XTensor * b, T num) \
{ \
{ \
CheckNTErrors((IsSameShaped(a, b)), \
CheckNTErrors((
_
IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \
"Input tensors should have the same type!"); \
CheckNTErrors((a->isSparse == false), "TODO!"); \
CheckNTErrors((a->isSparse == false), "TODO!"); \
\
\
...
...
source/tensor/core/math/Clip.cpp
查看文件 @
b3ecba16
...
@@ -44,7 +44,7 @@ void _Clip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper)
...
@@ -44,7 +44,7 @@ void _Clip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper)
}
}
#endif
#endif
CheckNTErrors
((
IsSameShaped
(
a
,
b
)),
"Input tensors should have the same type!"
);
CheckNTErrors
((
_
IsSameShaped
(
a
,
b
)),
"Input tensors should have the same type!"
);
CheckNTErrors
((
a
->
dataType
==
DEFAULT_DTYPE
),
"TODO!"
);
CheckNTErrors
((
a
->
dataType
==
DEFAULT_DTYPE
),
"TODO!"
);
DTYPE
*
d
=
(
DTYPE
*
)
a
->
data
;
DTYPE
*
d
=
(
DTYPE
*
)
a
->
data
;
...
@@ -111,7 +111,7 @@ XTensor Clip(const XTensor & a, DTYPE lower, DTYPE upper)
...
@@ -111,7 +111,7 @@ XTensor Clip(const XTensor & a, DTYPE lower, DTYPE upper)
void
Clip
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
lower
,
DTYPE
upper
)
void
Clip
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
lower
,
DTYPE
upper
)
{
{
if
(
!
b
.
isInit
||
!
IsSameShaped
(
&
a
,
&
b
))
{
if
(
!
b
.
isInit
||
!
IsSameShaped
(
a
,
b
))
{
InitTensor
(
&
b
,
&
a
);
InitTensor
(
&
b
,
&
a
);
}
}
...
...
source/tensor/core/math/Clip.cu
查看文件 @
b3ecba16
...
@@ -75,7 +75,7 @@ set each entry to its clip value
...
@@ -75,7 +75,7 @@ set each entry to its clip value
*/
*/
void _CudaClip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper)
void _CudaClip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper)
{
{
CheckNTErrors((IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((
_
IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->isSparse == false), "TODO!");
CheckNTErrors((a->isSparse == false), "TODO!");
int gridSize[3];
int gridSize[3];
...
...
source/tensor/core/math/Compare.cpp
查看文件 @
b3ecba16
...
@@ -42,7 +42,7 @@ DTYPE myIsNotEqual(DTYPE a, DTYPE b)
...
@@ -42,7 +42,7 @@ DTYPE myIsNotEqual(DTYPE a, DTYPE b)
#define _SIMPLE_COMPARE_FUNCTION(_funcName, _cudaFuncName, origFunc) \
#define _SIMPLE_COMPARE_FUNCTION(_funcName, _cudaFuncName, origFunc) \
void _funcName(const XTensor * a, XTensor * b, DTYPE number) \
void _funcName(const XTensor * a, XTensor * b, DTYPE number) \
{ \
{ \
CheckNTErrors((IsSameShaped(a, b)), \
CheckNTErrors((
_
IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \
"Input tensors should have the same type!"); \
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!"); \
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!"); \
/* run it on GPUs */
\
/* run it on GPUs */
\
...
@@ -59,7 +59,7 @@ void _funcName(const XTensor * a, XTensor * b, DTYPE number)
...
@@ -59,7 +59,7 @@ void _funcName(const XTensor * a, XTensor * b, DTYPE number)
#define _SIMPLE_COMPARE_FUNCTION(_funcName, origFunc) \
#define _SIMPLE_COMPARE_FUNCTION(_funcName, origFunc) \
void _funcName(const XTensor * a, XTensor * b, DTYPE number) \
void _funcName(const XTensor * a, XTensor * b, DTYPE number) \
{ \
{ \
CheckNTErrors((IsSameShaped(a, b)), \
CheckNTErrors((
_
IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \
"Input tensors should have the same type!"); \
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!"); \
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!"); \
/* run it on GPUs */
\
/* run it on GPUs */
\
...
@@ -97,7 +97,7 @@ XTensor funcName(const XTensor &a, DTYPE number)
...
@@ -97,7 +97,7 @@ XTensor funcName(const XTensor &a, DTYPE number)
#define SIMPLE_COMPARE_FUNCTION_VOID(funcName, _funcName, operationId) \
#define SIMPLE_COMPARE_FUNCTION_VOID(funcName, _funcName, operationId) \
void funcName(const XTensor &a, XTensor &b, DTYPE number) \
void funcName(const XTensor &a, XTensor &b, DTYPE number) \
{ \
{ \
if (!b.isInit || !IsSameShaped(
&a, &
b)) { \
if (!b.isInit || !IsSameShaped(
a,
b)) { \
InitTensor(&b, &a); \
InitTensor(&b, &a); \
} \
} \
_funcName(&a, &b, number); \
_funcName(&a, &b, number); \
...
...
source/tensor/core/math/Normalize.cpp
查看文件 @
b3ecba16
...
@@ -48,9 +48,9 @@ void _Normalize(const XTensor * input, XTensor * output, int dim,
...
@@ -48,9 +48,9 @@ void _Normalize(const XTensor * input, XTensor * output, int dim,
const
XTensor
*
a
,
const
XTensor
*
b
,
DTYPE
epsilon
)
const
XTensor
*
a
,
const
XTensor
*
b
,
DTYPE
epsilon
)
{
{
int
dimRDI
=
input
->
order
-
dim
-
1
;
int
dimRDI
=
input
->
order
-
dim
-
1
;
CheckNTErrors
((
IsSameShaped
(
input
,
output
)),
"Unmatched input tensors!"
);
CheckNTErrors
((
_
IsSameShaped
(
input
,
output
)),
"Unmatched input tensors!"
);
CheckNTErrors
((
IsSameShaped
(
a
,
b
)),
"Unmatched input tensors"
);
CheckNTErrors
((
_
IsSameShaped
(
a
,
b
)),
"Unmatched input tensors"
);
CheckNTErrors
((
IsSameShaped
(
mean
,
var
)),
"Unmatched input tensors"
);
CheckNTErrors
((
_
IsSameShaped
(
mean
,
var
)),
"Unmatched input tensors"
);
CheckNTErrors
((
input
&&
output
&&
mean
&&
var
&&
a
&&
b
),
"Empty input tensors!"
);
CheckNTErrors
((
input
&&
output
&&
mean
&&
var
&&
a
&&
b
),
"Empty input tensors!"
);
CheckNTErrors
((
dimRDI
>=
0
&&
dimRDI
<
input
->
order
),
"Incorrect reduction dimension!"
);
CheckNTErrors
((
dimRDI
>=
0
&&
dimRDI
<
input
->
order
),
"Incorrect reduction dimension!"
);
CheckNTErrors
((
input
->
order
==
mean
->
order
+
1
),
"Incorrect reduction dimension!"
);
CheckNTErrors
((
input
->
order
==
mean
->
order
+
1
),
"Incorrect reduction dimension!"
);
...
@@ -204,7 +204,7 @@ void Normalize(const XTensor &input, XTensor &output, int dim,
...
@@ -204,7 +204,7 @@ void Normalize(const XTensor &input, XTensor &output, int dim,
const
XTensor
&
mean
,
const
XTensor
&
var
,
const
XTensor
&
mean
,
const
XTensor
&
var
,
const
XTensor
&
a
,
const
XTensor
&
b
,
DTYPE
epsilon
)
const
XTensor
&
a
,
const
XTensor
&
b
,
DTYPE
epsilon
)
{
{
if
(
!
output
.
isInit
||
!
IsSameShaped
(
&
input
,
&
output
))
{
if
(
!
output
.
isInit
||
!
IsSameShaped
(
input
,
output
))
{
InitTensor
(
&
output
,
&
input
);
InitTensor
(
&
output
,
&
input
);
}
}
...
...
source/tensor/core/math/ScaleAndShift.cpp
查看文件 @
b3ecba16
...
@@ -148,7 +148,7 @@ b = a * scale + shift
...
@@ -148,7 +148,7 @@ b = a * scale + shift
*/
*/
void
ScaleAndShift
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
scale
,
DTYPE
shift
)
void
ScaleAndShift
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
scale
,
DTYPE
shift
)
{
{
if
(
!
b
.
isInit
||
!
IsSameShaped
(
&
a
,
&
b
))
{
if
(
!
b
.
isInit
||
!
IsSameShaped
(
a
,
b
))
{
InitTensor
(
&
b
,
&
a
);
InitTensor
(
&
b
,
&
a
);
}
}
...
...
source/tensor/core/math/Unary.cpp
查看文件 @
b3ecba16
...
@@ -78,7 +78,7 @@ void _funcName(const XTensor * a, XTensor * b)
...
@@ -78,7 +78,7 @@ void _funcName(const XTensor * a, XTensor * b)
_cudaFuncName(a, b); \
_cudaFuncName(a, b); \
return; \
return; \
} \
} \
CheckNTErrors((IsSameShaped(a, b)), \
CheckNTErrors((
_
IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \
"Input tensors should have the same type!"); \
if (a->dataType == X_INT) { \
if (a->dataType == X_INT) { \
int * d = (int*)a->data; \
int * d = (int*)a->data; \
...
@@ -109,7 +109,7 @@ void _funcName(const XTensor * a, XTensor * b)
...
@@ -109,7 +109,7 @@ void _funcName(const XTensor * a, XTensor * b)
if (a->devID >= 0) { \
if (a->devID >= 0) { \
ShowNTErrors("No GPU devices support!") \
ShowNTErrors("No GPU devices support!") \
} \
} \
CheckNTErrors((IsSameShaped(a, b)), \
CheckNTErrors((
_
IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \
"Input tensors should have the same type!"); \
if (a->dataType == X_INT) { \
if (a->dataType == X_INT) { \
int * d = (int*)a->data; \
int * d = (int*)a->data; \
...
@@ -161,7 +161,7 @@ XTensor funcName(const XTensor & a)
...
@@ -161,7 +161,7 @@ XTensor funcName(const XTensor & a)
#define SIMPLE_UNARY_FUNCTION_VOID(funcName, _funcName, operationId) \
#define SIMPLE_UNARY_FUNCTION_VOID(funcName, _funcName, operationId) \
void funcName(const XTensor & a, XTensor & b) \
void funcName(const XTensor & a, XTensor & b) \
{ \
{ \
if (!b.isInit || !IsSameShaped(
&a, &
b)) { \
if (!b.isInit || !IsSameShaped(
a,
b)) { \
InitTensor(&b, &a); \
InitTensor(&b, &a); \
} \
} \
_funcName(&a, &b); \
_funcName(&a, &b); \
...
...
source/tensor/core/math/Unary.cu
查看文件 @
b3ecba16
...
@@ -155,7 +155,7 @@ void Kernel##funcName(T * a, T * b, int size) \
...
@@ -155,7 +155,7 @@ void Kernel##funcName(T * a, T * b, int size) \
} \
} \
void _Cuda##funcName(const XTensor * a, XTensor * b) \
void _Cuda##funcName(const XTensor * a, XTensor * b) \
{ \
{ \
CheckNTErrors((IsSameShaped(a, b)), \
CheckNTErrors((
_
IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \
"Input tensors should have the same type!"); \
CheckNTErrors(a->isSparse == false, "TODO!"); \
CheckNTErrors(a->isSparse == false, "TODO!"); \
\
\
...
...
source/tensor/core/movement/CopyInGrid.cpp
查看文件 @
b3ecba16
...
@@ -39,7 +39,7 @@ in the k-th grid
...
@@ -39,7 +39,7 @@ in the k-th grid
*/
*/
void
_CopyInGrid
(
const
XTensor
*
s
,
XTensor
*
t
,
int
*
index
,
int
blockDim
,
int
blockNumInGrid
,
bool
isIndexOnDev
)
void
_CopyInGrid
(
const
XTensor
*
s
,
XTensor
*
t
,
int
*
index
,
int
blockDim
,
int
blockNumInGrid
,
bool
isIndexOnDev
)
{
{
CheckNTErrors
((
IsSameShaped
(
s
,
t
)),
"Unmatched tensors!"
);
CheckNTErrors
((
_
IsSameShaped
(
s
,
t
)),
"Unmatched tensors!"
);
int
blockDimRDI
=
s
->
order
-
blockDim
-
1
;
int
blockDimRDI
=
s
->
order
-
blockDim
-
1
;
int
blockSize
=
1
;
int
blockSize
=
1
;
...
...
source/tensor/core/reduce/ReduceSum.cpp
查看文件 @
b3ecba16
...
@@ -52,7 +52,7 @@ void _ReduceSum(const XTensor * input, XTensor * output, int dim, const XTensor
...
@@ -52,7 +52,7 @@ void _ReduceSum(const XTensor * input, XTensor * output, int dim, const XTensor
CheckNTErrors
((
input
->
order
==
output
->
order
+
1
),
"Incorrect tensor sizes!"
);
CheckNTErrors
((
input
->
order
==
output
->
order
+
1
),
"Incorrect tensor sizes!"
);
CheckNTErrors
((
input
->
order
>
dim
&&
dim
>=
0
),
"Illegal dimension to reduce!"
);
CheckNTErrors
((
input
->
order
>
dim
&&
dim
>=
0
),
"Illegal dimension to reduce!"
);
CheckNTErrors
((
input
->
dataType
==
output
->
dataType
),
"Unmatched data types!"
);
CheckNTErrors
((
input
->
dataType
==
output
->
dataType
),
"Unmatched data types!"
);
CheckNTErrors
((
shift
==
NULL
||
IsSameShaped
(
output
,
shift
)),
"Incorrect shift tensor size!"
);
CheckNTErrors
((
shift
==
NULL
||
_
IsSameShaped
(
output
,
shift
)),
"Incorrect shift tensor size!"
);
int
dimRDI
=
input
->
order
-
dim
-
1
;
int
dimRDI
=
input
->
order
-
dim
-
1
;
CheckNTErrors
(
dimRDI
>=
0
,
"Wrong dimension!"
);
CheckNTErrors
(
dimRDI
>=
0
,
"Wrong dimension!"
);
...
...
source/tensor/core/shape/Concatenate.cpp
查看文件 @
b3ecba16
...
@@ -45,7 +45,7 @@ void _Concatenate(const TensorList * smalls, XTensor * big, int dim)
...
@@ -45,7 +45,7 @@ void _Concatenate(const TensorList * smalls, XTensor * big, int dim)
XTensor
*
a
=
(
XTensor
*
)
smalls
->
GetItem
(
i
-
1
);
XTensor
*
a
=
(
XTensor
*
)
smalls
->
GetItem
(
i
-
1
);
XTensor
*
b
=
(
XTensor
*
)
smalls
->
GetItem
(
i
);
XTensor
*
b
=
(
XTensor
*
)
smalls
->
GetItem
(
i
);
CheckNTErrors
((
a
&&
b
),
"Empty input tensors!"
);
CheckNTErrors
((
a
&&
b
),
"Empty input tensors!"
);
if
(
!
IsSameShaped
(
a
,
b
))
if
(
!
_
IsSameShaped
(
a
,
b
))
uniform
=
false
;
uniform
=
false
;
}
}
...
@@ -77,7 +77,7 @@ XTensor Concatenate(const TensorList &smalls, int dim)
...
@@ -77,7 +77,7 @@ XTensor Concatenate(const TensorList &smalls, int dim)
XTensor
*
a
=
(
XTensor
*
)
smalls
.
GetItem
(
i
-
1
);
XTensor
*
a
=
(
XTensor
*
)
smalls
.
GetItem
(
i
-
1
);
XTensor
*
b
=
(
XTensor
*
)
smalls
.
GetItem
(
i
);
XTensor
*
b
=
(
XTensor
*
)
smalls
.
GetItem
(
i
);
CheckNTErrors
((
a
&&
b
),
"Empty input tensors!"
);
CheckNTErrors
((
a
&&
b
),
"Empty input tensors!"
);
if
(
!
IsSameShaped
(
a
,
b
))
if
(
!
_
IsSameShaped
(
a
,
b
))
uniform
=
false
;
uniform
=
false
;
}
}
XTensor
*
tensor
=
(
XTensor
*
)
smalls
.
GetItem
(
0
);
XTensor
*
tensor
=
(
XTensor
*
)
smalls
.
GetItem
(
0
);
...
@@ -190,7 +190,7 @@ void Concatenate(const TensorList & smalls, XTensor & big, int dim)
...
@@ -190,7 +190,7 @@ void Concatenate(const TensorList & smalls, XTensor & big, int dim)
XTensor
*
a
=
(
XTensor
*
)
smalls
.
GetItem
(
i
-
1
);
XTensor
*
a
=
(
XTensor
*
)
smalls
.
GetItem
(
i
-
1
);
XTensor
*
b
=
(
XTensor
*
)
smalls
.
GetItem
(
i
);
XTensor
*
b
=
(
XTensor
*
)
smalls
.
GetItem
(
i
);
CheckNTErrors
((
a
&&
b
),
"Empty input tensors!"
);
CheckNTErrors
((
a
&&
b
),
"Empty input tensors!"
);
if
(
!
IsSameShaped
(
a
,
b
))
if
(
!
_
IsSameShaped
(
a
,
b
))
uniform
=
false
;
uniform
=
false
;
}
}
...
@@ -291,7 +291,7 @@ XTensor Concatenate(const XTensor &smallA, const XTensor &smallB, int dim)
...
@@ -291,7 +291,7 @@ XTensor Concatenate(const XTensor &smallA, const XTensor &smallB, int dim)
XTensor
*
a
=
(
XTensor
*
)
smalls
.
Get
(
i
-
1
);
XTensor
*
a
=
(
XTensor
*
)
smalls
.
Get
(
i
-
1
);
XTensor
*
b
=
(
XTensor
*
)
smalls
.
Get
(
i
);
XTensor
*
b
=
(
XTensor
*
)
smalls
.
Get
(
i
);
CheckNTErrors
((
a
&&
b
),
"Empty input tensors!"
);
CheckNTErrors
((
a
&&
b
),
"Empty input tensors!"
);
if
(
!
IsSameShaped
(
a
,
b
))
if
(
!
_
IsSameShaped
(
a
,
b
))
uniform
=
false
;
uniform
=
false
;
}
}
XTensor
*
tensor
=
(
XTensor
*
)
smalls
.
Get
(
0
);
XTensor
*
tensor
=
(
XTensor
*
)
smalls
.
Get
(
0
);
...
...
source/tensor/core/shape/IsSameShaped.cpp
查看文件 @
b3ecba16
...
@@ -30,7 +30,7 @@ check whether the two matrices are in the same type and size
...
@@ -30,7 +30,7 @@ check whether the two matrices are in the same type and size
>> b - anther tensor to compare with
>> b - anther tensor to compare with
<< return - whether the two input tensors are identical
<< return - whether the two input tensors are identical
*/
*/
bool
IsSameShaped
(
const
XTensor
*
a
,
const
XTensor
*
b
)
bool
_
IsSameShaped
(
const
XTensor
*
a
,
const
XTensor
*
b
)
{
{
if
(
a
==
NULL
||
b
==
NULL
)
if
(
a
==
NULL
||
b
==
NULL
)
return
false
;
return
false
;
...
@@ -56,15 +56,38 @@ bool IsSameShaped(const XTensor * a, const XTensor * b)
...
@@ -56,15 +56,38 @@ bool IsSameShaped(const XTensor * a, const XTensor * b)
}
}
/*
/*
check whether the two matrices are in the same type and size
>> a - input tensor
>> b - anther tensor to compare with
<< return - whether the two input tensors are identical
*/
bool
IsSameShaped
(
const
XTensor
&
a
,
const
XTensor
&
b
)
{
return
_IsSameShaped
(
&
a
,
&
b
);
}
/*
check whether the three matrices are in the same type and size
check whether the three matrices are in the same type and size
>> a - input tensor
>> a - input tensor
>> b - anther tensor to compare with
>> b - anther tensor to compare with
>> c - a tensor again
>> c - a tensor again
<< return - whether the two input tensors are identical
<< return - whether the two input tensors are identical
*/
*/
bool
IsSameShaped
(
const
XTensor
*
a
,
const
XTensor
*
b
,
const
XTensor
*
c
)
bool
_
IsSameShaped
(
const
XTensor
*
a
,
const
XTensor
*
b
,
const
XTensor
*
c
)
{
{
return
IsSameShaped
(
a
,
b
)
&&
IsSameShaped
(
a
,
c
);
return
IsSameShaped
(
a
,
b
)
&&
IsSameShaped
(
a
,
c
);
}
}
/*
check whether the three matrices are in the same type and size
>> a - input tensor
>> b - anther tensor to compare with
>> c - a tensor again
<< return - whether the two input tensors are identical
*/
bool
IsSameShaped
(
const
XTensor
&
a
,
const
XTensor
&
b
,
const
XTensor
&
c
)
{
return
_IsSameShaped
(
&
a
,
&
b
,
&
c
);
}
}
//
namespace
nts
(
NiuTrans
.
Tensor
)
}
//
namespace
nts
(
NiuTrans
.
Tensor
)
\ No newline at end of file
source/tensor/core/shape/IsSameShaped.h
查看文件 @
b3ecba16
...
@@ -27,10 +27,16 @@
...
@@ -27,10 +27,16 @@
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
/* judge whether the two matrices are in the same type and size */
/* judge whether the two matrices are in the same type and size */
bool
IsSameShaped
(
const
XTensor
*
a
,
const
XTensor
*
b
);
bool
_IsSameShaped
(
const
XTensor
*
a
,
const
XTensor
*
b
);
/* judge whether the two matrices are in the same type and size */
bool
IsSameShaped
(
const
XTensor
&
a
,
const
XTensor
&
b
);
/* judge whether the three matrices are in the same type and size */
bool
_IsSameShaped
(
const
XTensor
*
a
,
const
XTensor
*
b
,
const
XTensor
*
c
);
/* judge whether the three matrices are in the same type and size */
/* judge whether the three matrices are in the same type and size */
bool
IsSameShaped
(
const
XTensor
*
a
,
const
XTensor
*
b
,
const
XTensor
*
c
);
bool
IsSameShaped
(
const
XTensor
&
a
,
const
XTensor
&
b
,
const
XTensor
&
c
);
}
// namespace nts(NiuTrans.Tensor)
}
// namespace nts(NiuTrans.Tensor)
...
...
source/tensor/core/shape/Merge.cpp
查看文件 @
b3ecba16
...
@@ -434,7 +434,7 @@ merge two tensors into a big tensor (return an XTensor structure)
...
@@ -434,7 +434,7 @@ merge two tensors into a big tensor (return an XTensor structure)
*/
*/
XTensor
Merge
(
const
XTensor
&
smallA
,
const
XTensor
&
smallB
,
int
whereToMerge
)
XTensor
Merge
(
const
XTensor
&
smallA
,
const
XTensor
&
smallB
,
int
whereToMerge
)
{
{
CheckNTErrors
(
IsSameShaped
(
&
smallA
,
&
smallB
),
CheckNTErrors
(
IsSameShaped
(
smallA
,
smallB
),
"The two tensors must be of the same size!"
);
"The two tensors must be of the same size!"
);
int
order
=
smallA
.
order
;
int
order
=
smallA
.
order
;
...
...
source/tensor/core/shape/Reshape.cpp
查看文件 @
b3ecba16
...
@@ -53,7 +53,7 @@ XTensor Reshape(XTensor &s, int order, int * dimSize)
...
@@ -53,7 +53,7 @@ XTensor Reshape(XTensor &s, int order, int * dimSize)
void
Reshape
(
XTensor
&
s
,
XTensor
&
t
,
int
order
,
int
*
dimSize
)
void
Reshape
(
XTensor
&
s
,
XTensor
&
t
,
int
order
,
int
*
dimSize
)
{
{
if
(
!
t
.
isInit
||
!
IsSameShaped
(
&
t
,
&
s
))
{
if
(
!
t
.
isInit
||
!
IsSameShaped
(
t
,
s
))
{
InitTensor
(
&
t
,
&
s
);
InitTensor
(
&
t
,
&
s
);
}
}
...
...
source/tensor/core/shape/Squeeze.cpp
查看文件 @
b3ecba16
...
@@ -38,7 +38,7 @@ void _Squeeze(XTensor * source, XTensor * target, int leadingDim)
...
@@ -38,7 +38,7 @@ void _Squeeze(XTensor * source, XTensor * target, int leadingDim)
{
{
int
order
=
target
->
order
;
int
order
=
target
->
order
;
CheckNTErrors
(
IsSameShaped
(
source
,
target
),
CheckNTErrors
(
_
IsSameShaped
(
source
,
target
),
"The source and target tensor must be of the same size!"
);
"The source and target tensor must be of the same size!"
);
CheckNTErrors
(
leadingDim
>=
-
1
&&
leadingDim
<
order
,
CheckNTErrors
(
leadingDim
>=
-
1
&&
leadingDim
<
order
,
"Wrong leading dimension"
);
"Wrong leading dimension"
);
...
@@ -131,7 +131,7 @@ XTensor Squeeze(XTensor & source, int leadingDim)
...
@@ -131,7 +131,7 @@ XTensor Squeeze(XTensor & source, int leadingDim)
void
Squeeze
(
XTensor
&
source
,
XTensor
&
target
,
int
leadingDim
)
void
Squeeze
(
XTensor
&
source
,
XTensor
&
target
,
int
leadingDim
)
{
{
if
(
!
target
.
isInit
||
!
IsSameShaped
(
&
source
,
&
target
))
{
if
(
!
target
.
isInit
||
!
IsSameShaped
(
source
,
target
))
{
InitTensor
(
&
target
,
&
source
);
InitTensor
(
&
target
,
&
source
);
}
}
...
...
source/tensor/core/sort/Sort.cpp
查看文件 @
b3ecba16
...
@@ -41,7 +41,7 @@ void _Sort(const XTensor * a, XTensor * b, XTensor * index, int dim)
...
@@ -41,7 +41,7 @@ void _Sort(const XTensor * a, XTensor * b, XTensor * index, int dim)
{
{
dim
=
MODX
(
dim
,
a
->
order
);
dim
=
MODX
(
dim
,
a
->
order
);
CheckNTErrors
((
IsSameShaped
(
a
,
b
)),
"Input tensors should have the same type!"
);
CheckNTErrors
((
_
IsSameShaped
(
a
,
b
)),
"Input tensors should have the same type!"
);
CheckNTErrors
((
dim
>=
0
&&
dim
<
a
->
order
),
"Incorrect dimension specified!"
);
CheckNTErrors
((
dim
>=
0
&&
dim
<
a
->
order
),
"Incorrect dimension specified!"
);
CheckNTErrors
((
a
->
order
==
index
->
order
),
"Unmatched input tensors!"
);
CheckNTErrors
((
a
->
order
==
index
->
order
),
"Unmatched input tensors!"
);
CheckNTErrors
((
index
->
dataType
==
X_INT
),
"Wrong data type!"
);
CheckNTErrors
((
index
->
dataType
==
X_INT
),
"Wrong data type!"
);
...
...
source/tensor/function/HardTanH.cpp
查看文件 @
b3ecba16
...
@@ -37,7 +37,7 @@ y = 1 if x > 1
...
@@ -37,7 +37,7 @@ y = 1 if x > 1
*/
*/
void
_HardTanH
(
const
XTensor
*
x
,
XTensor
*
y
)
void
_HardTanH
(
const
XTensor
*
x
,
XTensor
*
y
)
{
{
CheckNTErrors
(
IsSameShaped
(
x
,
y
),
CheckNTErrors
(
_
IsSameShaped
(
x
,
y
),
"The input tensor and output tensor must have the same shape!"
)
"The input tensor and output tensor must have the same shape!"
)
#ifdef USE_CUDA
#ifdef USE_CUDA
...
@@ -88,7 +88,7 @@ XTensor HardTanH(const XTensor &x)
...
@@ -88,7 +88,7 @@ XTensor HardTanH(const XTensor &x)
void
HardTanH
(
const
XTensor
&
x
,
XTensor
&
y
)
void
HardTanH
(
const
XTensor
&
x
,
XTensor
&
y
)
{
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
&
y
,
&
x
))
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
y
,
x
))
{
InitTensor
(
&
y
,
&
x
);
InitTensor
(
&
y
,
&
x
);
}
}
...
...
source/tensor/function/Identity.cpp
查看文件 @
b3ecba16
...
@@ -34,7 +34,7 @@ identity function y = x
...
@@ -34,7 +34,7 @@ identity function y = x
*/
*/
void
_Identity
(
const
XTensor
*
x
,
XTensor
*
y
)
void
_Identity
(
const
XTensor
*
x
,
XTensor
*
y
)
{
{
CheckNTErrors
(
IsSameShaped
(
x
,
y
),
CheckNTErrors
(
_
IsSameShaped
(
x
,
y
),
"The input tensor and output tensor must have the same shape!"
)
"The input tensor and output tensor must have the same shape!"
)
_CopyValues
(
x
,
y
);
_CopyValues
(
x
,
y
);
}
}
...
@@ -64,7 +64,7 @@ XTensor Identity(const XTensor &x)
...
@@ -64,7 +64,7 @@ XTensor Identity(const XTensor &x)
void
Identity
(
const
XTensor
&
x
,
XTensor
&
y
)
void
Identity
(
const
XTensor
&
x
,
XTensor
&
y
)
{
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
&
y
,
&
x
))
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
y
,
x
))
{
InitTensor
(
&
y
,
&
x
);
InitTensor
(
&
y
,
&
x
);
}
}
...
...
source/tensor/function/LogSoftmax.cpp
查看文件 @
b3ecba16
...
@@ -211,7 +211,7 @@ void LogSoftmax(const XTensor &x, XTensor &y, int leadDim)
...
@@ -211,7 +211,7 @@ void LogSoftmax(const XTensor &x, XTensor &y, int leadDim)
if
(
ld
<
0
)
if
(
ld
<
0
)
ld
=
x
.
order
-
1
;
ld
=
x
.
order
-
1
;
if
(
!
y
.
isInit
||
!
IsSameShaped
(
&
y
,
&
x
))
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
y
,
x
))
{
InitTensor
(
&
y
,
&
x
);
InitTensor
(
&
y
,
&
x
);
}
}
...
@@ -353,7 +353,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
...
@@ -353,7 +353,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
}
}
}
}
else
{
else
{
CheckNTErrors
((
IsSameShaped
(
gold
,
y
)),
"The tensors must be of the same size!"
);
CheckNTErrors
((
_
IsSameShaped
(
gold
,
y
)),
"The tensors must be of the same size!"
);
for
(
int
k
=
0
;
k
<
blockNum
;
k
++
)
{
for
(
int
k
=
0
;
k
<
blockNum
;
k
++
)
{
gp
=
(
DTYPE
*
)
gold
->
data
+
k
*
blockSize
;
gp
=
(
DTYPE
*
)
gold
->
data
+
k
*
blockSize
;
op
=
(
DTYPE
*
)
y
->
data
+
k
*
blockSize
;
op
=
(
DTYPE
*
)
y
->
data
+
k
*
blockSize
;
...
@@ -407,7 +407,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
...
@@ -407,7 +407,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
}
}
}
}
else
{
else
{
CheckNTErrors
((
IsSameShaped
(
gold
,
y
)),
"The tensors must be of the same size!"
);
CheckNTErrors
((
_
IsSameShaped
(
gold
,
y
)),
"The tensors must be of the same size!"
);
for
(
int
k
=
0
;
k
<
blockNum
;
k
++
)
{
for
(
int
k
=
0
;
k
<
blockNum
;
k
++
)
{
gp
=
(
DTYPE
*
)
gold
->
data
+
k
*
blockSize
;
gp
=
(
DTYPE
*
)
gold
->
data
+
k
*
blockSize
;
op
=
(
DTYPE
*
)
y
->
data
+
k
*
blockSize
;
op
=
(
DTYPE
*
)
y
->
data
+
k
*
blockSize
;
...
...
source/tensor/function/LogSoftmax.cu
查看文件 @
b3ecba16
...
@@ -431,7 +431,7 @@ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
...
@@ -431,7 +431,7 @@ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
dedx->dimSize[0], dedx->dimSize[1], gold->unitNumNonZero, lossName);
dedx->dimSize[0], dedx->dimSize[1], gold->unitNumNonZero, lossName);
}
}
else {
else {
CheckNTErrors((IsSameShaped(gold, y)), "The tensors must be of the same size!");
CheckNTErrors((
_
IsSameShaped(gold, y)), "The tensors must be of the same size!");
for (int k = 0; k < blockNum; k++) {
for (int k = 0; k < blockNum; k++) {
GDevs.GetCudaThread(x->devID, blockSize, cudaGridSize, cudaBlockSize);
GDevs.GetCudaThread(x->devID, blockSize, cudaGridSize, cudaBlockSize);
...
...
source/tensor/function/Loss.cpp
查看文件 @
b3ecba16
...
@@ -49,7 +49,7 @@ DTYPE _LossCompute(XTensor * gold, XTensor * output, LOSS_FUNCTION_NAME LFName,
...
@@ -49,7 +49,7 @@ DTYPE _LossCompute(XTensor * gold, XTensor * output, LOSS_FUNCTION_NAME LFName,
DTYPE
error
=
0.0
F
;
DTYPE
error
=
0.0
F
;
if
(
output
->
devID
<
0
)
{
if
(
output
->
devID
<
0
)
{
CheckNTErrors
((
gLen
>=
0
&&
gLen
<=
output
->
unitNum
),
"Illegal input length!"
);
CheckNTErrors
((
gLen
>=
0
&&
gLen
<=
output
->
unitNum
),
"Illegal input length!"
);
CheckNTErrors
((
IsSameShaped
(
gold
,
output
)),
"The input tensors must be of the same size!"
);
CheckNTErrors
((
_
IsSameShaped
(
gold
,
output
)),
"The input tensors must be of the same size!"
);
CheckNTErrors
((
gold
->
dimSizeRDI
[
0
]
==
1
&&
output
->
dimSizeRDI
[
0
]
==
1
),
"TODO!"
);
CheckNTErrors
((
gold
->
dimSizeRDI
[
0
]
==
1
&&
output
->
dimSizeRDI
[
0
]
==
1
),
"TODO!"
);
CheckNTErrors
((
gold
->
order
>
leadDim
&&
leadDim
>=
0
),
"Illegal leading dimension!"
);
CheckNTErrors
((
gold
->
order
>
leadDim
&&
leadDim
>=
0
),
"Illegal leading dimension!"
);
CheckNTErrors
((
gold
->
dataType
==
DEFAULT_DTYPE
&&
output
->
dataType
==
DEFAULT_DTYPE
),
"TODO!"
);
CheckNTErrors
((
gold
->
dataType
==
DEFAULT_DTYPE
&&
output
->
dataType
==
DEFAULT_DTYPE
),
"TODO!"
);
...
@@ -206,7 +206,7 @@ DTYPE _LossComputeForLogScale(XTensor * gold, XTensor * output,
...
@@ -206,7 +206,7 @@ DTYPE _LossComputeForLogScale(XTensor * gold, XTensor * output,
int
leadDim
,
int
gBeg
,
int
gLen
,
int
oBeg
)
int
leadDim
,
int
gBeg
,
int
gLen
,
int
oBeg
)
{
{
CheckNTErrors
(
gLen
>=
0
&&
gLen
<=
output
->
unitNum
,
"Illegal input length!"
);
CheckNTErrors
(
gLen
>=
0
&&
gLen
<=
output
->
unitNum
,
"Illegal input length!"
);
CheckNTErrors
(
IsSameShaped
(
gold
,
output
),
"The input tensors must be of the same size!"
);
CheckNTErrors
(
_
IsSameShaped
(
gold
,
output
),
"The input tensors must be of the same size!"
);
CheckNTErrors
(
gold
->
dimSizeRDI
[
0
]
==
1
&&
output
->
dimSizeRDI
[
0
]
==
1
,
"TODO!"
);
CheckNTErrors
(
gold
->
dimSizeRDI
[
0
]
==
1
&&
output
->
dimSizeRDI
[
0
]
==
1
,
"TODO!"
);
CheckNTErrors
(
gold
->
order
>
leadDim
&&
leadDim
>=
0
,
"Illegal leading dimension!"
);
CheckNTErrors
(
gold
->
order
>
leadDim
&&
leadDim
>=
0
,
"Illegal leading dimension!"
);
CheckNTErrors
(
gold
->
dataType
==
DEFAULT_DTYPE
&&
output
->
dataType
==
DEFAULT_DTYPE
,
"TODO!"
);
CheckNTErrors
(
gold
->
dataType
==
DEFAULT_DTYPE
&&
output
->
dataType
==
DEFAULT_DTYPE
,
"TODO!"
);
...
@@ -402,7 +402,7 @@ void _LossBackward(XTensor * dedy, XTensor * t, XTensor * y,
...
@@ -402,7 +402,7 @@ void _LossBackward(XTensor * dedy, XTensor * t, XTensor * y,
if
(
y
->
devID
<
0
)
{
if
(
y
->
devID
<
0
)
{
CheckNTErrors
(
tLen
<=
y
->
unitNum
,
"Illegal input length!"
);
CheckNTErrors
(
tLen
<=
y
->
unitNum
,
"Illegal input length!"
);
CheckNTErrors
(
IsSameShaped
(
t
,
y
)
&&
IsSameShaped
(
dedy
,
y
),
CheckNTErrors
(
_IsSameShaped
(
t
,
y
)
&&
_
IsSameShaped
(
dedy
,
y
),
"The input tensors must be of the same size!"
);
"The input tensors must be of the same size!"
);
CheckNTErrors
((
dedy
->
devID
==
t
->
devID
)
&&
(
dedy
->
devID
==
y
->
devID
),
CheckNTErrors
((
dedy
->
devID
==
t
->
devID
)
&&
(
dedy
->
devID
==
y
->
devID
),
"Tensor must be on the same device!"
);
"Tensor must be on the same device!"
);
...
...
source/tensor/function/Loss.cu
查看文件 @
b3ecba16
...
@@ -55,7 +55,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
...
@@ -55,7 +55,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
bool isLogOutput, int leadDim, int gBeg, int gLen, int yBeg)
bool isLogOutput, int leadDim, int gBeg, int gLen, int yBeg)
{
{
CheckNTErrors((gLen >= 0 && gLen <= y->unitNum), "Illegal input length!");
CheckNTErrors((gLen >= 0 && gLen <= y->unitNum), "Illegal input length!");
CheckNTErrors((IsSameShaped(gold, y)), "The input tensors must be of the same size!");
CheckNTErrors((
_
IsSameShaped(gold, y)), "The input tensors must be of the same size!");
CheckNTErrors((gold->dimSizeRDI[0] == 1 && y->dimSizeRDI[0] == 1), "TODO!");
CheckNTErrors((gold->dimSizeRDI[0] == 1 && y->dimSizeRDI[0] == 1), "TODO!");
CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!");
CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!");
CheckNTErrors((gold->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE), "TODO!");
CheckNTErrors((gold->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE), "TODO!");
...
@@ -332,7 +332,7 @@ void _CudaLossBackward(XTensor * dedy, XTensor * t, XTensor * y,
...
@@ -332,7 +332,7 @@ void _CudaLossBackward(XTensor * dedy, XTensor * t, XTensor * y,
int leadDim, int tBeg, int tLen, int yBeg)
int leadDim, int tBeg, int tLen, int yBeg)
{
{
CheckNTErrors((tLen <= y->unitNum), "Illegal input length!");
CheckNTErrors((tLen <= y->unitNum), "Illegal input length!");
CheckNTErrors((
IsSameShaped(t, y)&&
IsSameShaped(dedy, y)),
CheckNTErrors((
_IsSameShaped(t, y)&& _
IsSameShaped(dedy, y)),
"The input tensors must be of the same size!");
"The input tensors must be of the same size!");
CheckNTErrors(((dedy->devID == t->devID) && (dedy->devID == y->devID)),
CheckNTErrors(((dedy->devID == t->devID) && (dedy->devID == y->devID)),
"Tensor must be on the same device!");
"Tensor must be on the same device!");
...
...
source/tensor/function/Rectify.cpp
查看文件 @
b3ecba16
...
@@ -33,7 +33,7 @@ rectify function y = max(0, x)
...
@@ -33,7 +33,7 @@ rectify function y = max(0, x)
*/
*/
void
_Rectify
(
const
XTensor
*
x
,
XTensor
*
y
)
void
_Rectify
(
const
XTensor
*
x
,
XTensor
*
y
)
{
{
CheckNTErrors
(
IsSameShaped
(
x
,
y
),
CheckNTErrors
(
_
IsSameShaped
(
x
,
y
),
"The input tensor and output tensor must have the same shape!"
)
"The input tensor and output tensor must have the same shape!"
)
#ifdef USE_CUDA
#ifdef USE_CUDA
...
@@ -80,7 +80,7 @@ XTensor Rectify(const XTensor &x)
...
@@ -80,7 +80,7 @@ XTensor Rectify(const XTensor &x)
void
Rectify
(
const
XTensor
&
x
,
XTensor
&
y
)
void
Rectify
(
const
XTensor
&
x
,
XTensor
&
y
)
{
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
&
y
,
&
x
))
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
y
,
x
))
{
InitTensor
(
&
y
,
&
x
);
InitTensor
(
&
y
,
&
x
);
}
}
...
...
source/tensor/function/Sigmoid.cpp
查看文件 @
b3ecba16
...
@@ -35,7 +35,7 @@ sigmoid function y = 1/(1+exp(-x))
...
@@ -35,7 +35,7 @@ sigmoid function y = 1/(1+exp(-x))
*/
*/
void
_Sigmoid
(
const
XTensor
*
x
,
XTensor
*
y
)
void
_Sigmoid
(
const
XTensor
*
x
,
XTensor
*
y
)
{
{
CheckNTErrors
(
IsSameShaped
(
x
,
y
),
CheckNTErrors
(
_
IsSameShaped
(
x
,
y
),
"The input tensor and output tensor must have the same shape!"
)
"The input tensor and output tensor must have the same shape!"
)
#ifdef USE_CUDA
#ifdef USE_CUDA
...
@@ -83,7 +83,7 @@ XTensor Sigmoid(const XTensor &x)
...
@@ -83,7 +83,7 @@ XTensor Sigmoid(const XTensor &x)
void
Sigmoid
(
const
XTensor
&
x
,
XTensor
&
y
)
void
Sigmoid
(
const
XTensor
&
x
,
XTensor
&
y
)
{
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
&
y
,
&
x
))
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
y
,
x
))
{
InitTensor
(
&
y
,
&
x
);
InitTensor
(
&
y
,
&
x
);
}
}
...
...
source/tensor/function/Softmax.cpp
查看文件 @
b3ecba16
...
@@ -157,7 +157,7 @@ void Softmax(const XTensor &x, XTensor &y, int leadDim)
...
@@ -157,7 +157,7 @@ void Softmax(const XTensor &x, XTensor &y, int leadDim)
if
(
ld
<
0
)
if
(
ld
<
0
)
ld
=
x
.
order
-
1
;
ld
=
x
.
order
-
1
;
if
(
!
y
.
isInit
||
!
IsSameShaped
(
&
y
,
&
x
))
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
y
,
x
))
{
InitTensor
(
&
y
,
&
x
);
InitTensor
(
&
y
,
&
x
);
}
}
...
@@ -253,7 +253,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
...
@@ -253,7 +253,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
}
}
}
}
else
{
else
{
CheckNTErrors
((
IsSameShaped
(
gold
,
y
)),
"The tensors must be of the same size!"
);
CheckNTErrors
((
_
IsSameShaped
(
gold
,
y
)),
"The tensors must be of the same size!"
);
for
(
int
k
=
0
;
k
<
blockNum
;
k
++
){
for
(
int
k
=
0
;
k
<
blockNum
;
k
++
){
gp
=
(
DTYPE
*
)
gold
->
data
+
k
*
blockSize
;
gp
=
(
DTYPE
*
)
gold
->
data
+
k
*
blockSize
;
op
=
(
DTYPE
*
)
y
->
data
+
k
*
blockSize
;
op
=
(
DTYPE
*
)
y
->
data
+
k
*
blockSize
;
...
@@ -292,7 +292,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
...
@@ -292,7 +292,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
}
}
}
}
else
{
else
{
CheckNTErrors
((
IsSameShaped
(
gold
,
y
)),
"The tensors must be of the same size!"
);
CheckNTErrors
((
_
IsSameShaped
(
gold
,
y
)),
"The tensors must be of the same size!"
);
for
(
int
k
=
0
;
k
<
blockNum
;
k
++
){
for
(
int
k
=
0
;
k
<
blockNum
;
k
++
){
gp
=
(
DTYPE
*
)
gold
->
data
+
k
*
blockSize
;
gp
=
(
DTYPE
*
)
gold
->
data
+
k
*
blockSize
;
op
=
(
DTYPE
*
)
y
->
data
+
k
*
blockSize
;
op
=
(
DTYPE
*
)
y
->
data
+
k
*
blockSize
;
...
...
source/tensor/function/Softmax.cu
查看文件 @
b3ecba16
...
@@ -224,7 +224,7 @@ void _CudaSoftmaxSumMax(const XTensor * x, XTensor * y, int leadDim, XTensor * s
...
@@ -224,7 +224,7 @@ void _CudaSoftmaxSumMax(const XTensor * x, XTensor * y, int leadDim, XTensor * s
{
{
CheckNTErrors((x->devID >= 0), "Forward computation of softmax must be run on GPUs.");
CheckNTErrors((x->devID >= 0), "Forward computation of softmax must be run on GPUs.");
CheckNTErrors((x->devID == y->devID), "Tensors used in softmax are not on the same GPU.");
CheckNTErrors((x->devID == y->devID), "Tensors used in softmax are not on the same GPU.");
CheckNTErrors((IsSameShaped(x, y)), "Input tensors must be of the same size!");
CheckNTErrors((
_
IsSameShaped(x, y)), "Input tensors must be of the same size!");
int leadDimRDI = y->order - leadDim - 1;
int leadDimRDI = y->order - leadDim - 1;
int dimensionSize = y->dimSizeRDI[leadDimRDI];
int dimensionSize = y->dimSizeRDI[leadDimRDI];
...
...
source/tensor/loss/CrossEntropy.cpp
查看文件 @
b3ecba16
...
@@ -55,10 +55,10 @@ void _CrossEntropy(const XTensor * output, const XTensor * gold,
...
@@ -55,10 +55,10 @@ void _CrossEntropy(const XTensor * output, const XTensor * gold,
int
unitNum
=
output
->
dimSize
[
n
];
int
unitNum
=
output
->
dimSize
[
n
];
CheckNTErrors
(
n
>=
0
&&
n
<
output
->
order
,
"Wrong leadingDim!"
);
CheckNTErrors
(
n
>=
0
&&
n
<
output
->
order
,
"Wrong leadingDim!"
);
CheckNTErrors
(
IsSameShaped
(
output
,
gold
),
CheckNTErrors
(
_
IsSameShaped
(
output
,
gold
),
"The output tensor and gold tensor must be of the same size!"
);
"The output tensor and gold tensor must be of the same size!"
);
CheckNTErrors
(
weight
==
NULL
||
weight
->
unitNum
==
unitNum
,
"Wrong weight tensor!"
);
CheckNTErrors
(
weight
==
NULL
||
weight
->
unitNum
==
unitNum
,
"Wrong weight tensor!"
);
CheckNTErrors
(
padding
==
NULL
||
IsSameShaped
(
padding
,
loss
),
CheckNTErrors
(
padding
==
NULL
||
_
IsSameShaped
(
padding
,
loss
),
"The loss tensor and padding tensor must be same shape!"
);
"The loss tensor and padding tensor must be same shape!"
);
CheckNTErrors
(
loss
->
order
==
output
->
order
-
1
,
"Wrong loss dimension!"
);
CheckNTErrors
(
loss
->
order
==
output
->
order
-
1
,
"Wrong loss dimension!"
);
CheckNTErrors
(
gold
->
dataType
==
DEFAULT_DTYPE
&&
output
->
dataType
==
DEFAULT_DTYPE
,
"TODO!"
);
CheckNTErrors
(
gold
->
dataType
==
DEFAULT_DTYPE
&&
output
->
dataType
==
DEFAULT_DTYPE
,
"TODO!"
);
...
@@ -102,11 +102,11 @@ void _CrossEntropyFast(const XTensor * output, const XTensor * gold,
...
@@ -102,11 +102,11 @@ void _CrossEntropyFast(const XTensor * output, const XTensor * gold,
CheckNTErrors
(
n
>=
0
&&
n
<
output
->
order
,
CheckNTErrors
(
n
>=
0
&&
n
<
output
->
order
,
"Wrong leading dimension!"
);
"Wrong leading dimension!"
);
CheckNTErrors
(
IsSameShaped
(
output
,
gold
),
CheckNTErrors
(
_
IsSameShaped
(
output
,
gold
),
"The output tensor and gold tensor must be of the same size!"
);
"The output tensor and gold tensor must be of the same size!"
);
CheckNTErrors
(
weight
==
NULL
||
weight
->
unitNum
==
leadingDimSize
,
CheckNTErrors
(
weight
==
NULL
||
weight
->
unitNum
==
leadingDimSize
,
"Wrong weight tensor!"
);
"Wrong weight tensor!"
);
CheckNTErrors
(
padding
==
NULL
||
IsSameShaped
(
padding
,
loss
),
CheckNTErrors
(
padding
==
NULL
||
_
IsSameShaped
(
padding
,
loss
),
"The loss tensor and padding tensor must be same shape!"
);
"The loss tensor and padding tensor must be same shape!"
);
CheckNTErrors
(
loss
->
order
==
output
->
order
-
1
,
CheckNTErrors
(
loss
->
order
==
output
->
order
-
1
,
"Wrong loss dimension!"
);
"Wrong loss dimension!"
);
...
@@ -338,7 +338,7 @@ DTYPE _CrossEntropy(const XTensor * output, const XTensor * gold,
...
@@ -338,7 +338,7 @@ DTYPE _CrossEntropy(const XTensor * output, const XTensor * gold,
int
unitNum
=
output
->
dimSize
[
n
];
int
unitNum
=
output
->
dimSize
[
n
];
CheckNTErrors
(
n
>=
0
&&
n
<
output
->
order
,
"Wrong leadingDim!"
);
CheckNTErrors
(
n
>=
0
&&
n
<
output
->
order
,
"Wrong leadingDim!"
);
CheckNTErrors
(
IsSameShaped
(
output
,
gold
),
CheckNTErrors
(
_
IsSameShaped
(
output
,
gold
),
"The output tensor and gold tensor must be of the same size!"
);
"The output tensor and gold tensor must be of the same size!"
);
CheckNTErrors
(
weight
==
NULL
||
weight
->
unitNum
==
unitNum
,
"Wrong weight tensor!"
);
CheckNTErrors
(
weight
==
NULL
||
weight
->
unitNum
==
unitNum
,
"Wrong weight tensor!"
);
CheckNTErrors
(
padding
==
NULL
||
padding
->
order
==
output
->
order
-
1
,
CheckNTErrors
(
padding
==
NULL
||
padding
->
order
==
output
->
order
-
1
,
...
@@ -413,7 +413,7 @@ DTYPE _CrossEntropyFast(const XTensor * output, const XTensor * gold,
...
@@ -413,7 +413,7 @@ DTYPE _CrossEntropyFast(const XTensor * output, const XTensor * gold,
CheckNTErrors
(
n
>=
0
&&
n
<
output
->
order
,
CheckNTErrors
(
n
>=
0
&&
n
<
output
->
order
,
"Wrong leadingDim!"
);
"Wrong leadingDim!"
);
CheckNTErrors
(
IsSameShaped
(
output
,
gold
),
CheckNTErrors
(
_
IsSameShaped
(
output
,
gold
),
"The output tensor and gold tensor must be of the same size!"
);
"The output tensor and gold tensor must be of the same size!"
);
CheckNTErrors
(
weight
==
NULL
||
weight
->
unitNum
==
leadingDimSize
,
CheckNTErrors
(
weight
==
NULL
||
weight
->
unitNum
==
leadingDimSize
,
"Wrong weight tensor!"
);
"Wrong weight tensor!"
);
...
@@ -565,7 +565,7 @@ void _CrossEntropyBackward(XTensor * dedy, const XTensor * output,
...
@@ -565,7 +565,7 @@ void _CrossEntropyBackward(XTensor * dedy, const XTensor * output,
CheckNTErrors
(
n
>=
0
&&
n
<
output
->
order
,
CheckNTErrors
(
n
>=
0
&&
n
<
output
->
order
,
"Wrong leading dimension!"
);
"Wrong leading dimension!"
);
CheckNTErrors
(
IsSameShaped
(
dedy
,
output
,
gold
),
CheckNTErrors
(
_
IsSameShaped
(
dedy
,
output
,
gold
),
"The output tensor and gold tensor must be of the same size!"
);
"The output tensor and gold tensor must be of the same size!"
);
CheckNTErrors
(
weight
==
NULL
||
weight
->
unitNum
==
leadingDimSize
,
CheckNTErrors
(
weight
==
NULL
||
weight
->
unitNum
==
leadingDimSize
,
"Wrong weight tensor!"
);
"Wrong weight tensor!"
);
...
...
source/tensor/loss/CrossEntropy.cu
查看文件 @
b3ecba16
...
@@ -101,7 +101,7 @@ DTYPE _CudaCrossEntropyFast(const XTensor * output, const XTensor * gold,
...
@@ -101,7 +101,7 @@ DTYPE _CudaCrossEntropyFast(const XTensor * output, const XTensor * gold,
CheckNTErrors(n >= 0 && n < output->order,
CheckNTErrors(n >= 0 && n < output->order,
"Wrong leadingDim!");
"Wrong leadingDim!");
CheckNTErrors(IsSameShaped(output, gold),
CheckNTErrors(
_
IsSameShaped(output, gold),
"The output tensor and gold tensor must be of the same size!");
"The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize,
CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize,
"Wrong weight tensor!");
"Wrong weight tensor!");
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论