Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
N
NiuTrans.Tensor
概览
Overview
Details
Activity
Cycle Analytics
版本库
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
问题
8
Issues
8
列表
Board
标记
里程碑
合并请求
0
Merge Requests
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
Snippets
成员
Collapse sidebar
Close sidebar
活动
图像
聊天
创建新问题
作业
提交
Issue Boards
Open sidebar
NiuTrans
NiuTrans.Tensor
Commits
7ae1562d
Commit
7ae1562d
authored
Jul 20, 2018
by
xiaotong
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
replace XTensor::IsIdentical with XTensor::IsSameShaped
parent
37b7e09b
隐藏空白字符变更
内嵌
并排
正在显示
36 个修改的文件
包含
64 行增加
和
62 行删除
+64
-62
source/network/XBackwardMath.cpp
+1
-1
source/network/XBackwardShape.cpp
+1
-1
source/network/XNoder.cpp
+1
-1
source/tensor/XTensor.cpp
+3
-3
source/tensor/XTensor.h
+2
-2
source/tensor/core/arithmetic/Absolute.cpp
+1
-1
source/tensor/core/arithmetic/Absolute.cu
+1
-1
source/tensor/core/arithmetic/MatrixMULBatchedCPU.cpp
+3
-3
source/tensor/core/arithmetic/Negate.cpp
+1
-1
source/tensor/core/arithmetic/Negate.cu
+1
-1
source/tensor/core/arithmetic/Sign.cpp
+1
-1
source/tensor/core/arithmetic/Sign.cu
+1
-1
source/tensor/core/arithmetic/SumByColumnTV.cpp
+1
-1
source/tensor/core/arithmetic/SumByColumnTV.cu
+1
-1
source/tensor/core/arithmetic/SumByColumnVT.cpp
+1
-1
source/tensor/core/arithmetic/SumByColumnVT.cu
+1
-1
source/tensor/core/arithmetic/XTensorBLAS.cu
+3
-3
source/tensor/core/math/Log.cpp
+1
-1
source/tensor/core/math/Log.cu
+1
-1
source/tensor/core/math/Normalize.cpp
+3
-3
source/tensor/core/math/Power.cu
+1
-1
source/tensor/core/movement/CopyInGrid.cpp
+1
-1
source/tensor/core/reduce/ReduceSum.cpp
+1
-1
source/tensor/core/shape/Concatenate.cpp
+3
-3
source/tensor/core/shape/Merge.cpp
+1
-1
source/tensor/core/sort/Sort.cpp
+1
-1
source/tensor/function/HardTanH.cpp
+1
-1
source/tensor/function/Identity.cpp
+1
-1
source/tensor/function/LogSoftmax.cpp
+2
-2
source/tensor/function/LogSoftmax.cu
+1
-1
source/tensor/function/Loss.cpp
+6
-5
source/tensor/function/Loss.cu
+10
-9
source/tensor/function/Rectify.cpp
+1
-1
source/tensor/function/Sigmoid.cpp
+2
-2
source/tensor/function/Softmax.cpp
+2
-2
source/tensor/function/Softmax.cu
+1
-1
没有找到文件。
source/network/XBackwardMath.cpp
查看文件 @
7ae1562d
...
@@ -96,7 +96,7 @@ void XMathGrad::GradMultiply(XTensor * node)
...
@@ -96,7 +96,7 @@ void XMathGrad::GradMultiply(XTensor * node)
XNoder
::
MakeGrad
(
a
);
XNoder
::
MakeGrad
(
a
);
XNoder
::
MakeGrad
(
b
);
XNoder
::
MakeGrad
(
b
);
CheckNTErrors
(
XTensor
::
Is
Identical
(
a
,
b
),
"Wrong sized input tensors!"
);
CheckNTErrors
(
XTensor
::
Is
SameShaped
(
a
,
b
),
"Wrong sized input tensors!"
);
_Multiply
(
node
->
grad
,
b
,
a
->
grad
,
1.0
F
);
_Multiply
(
node
->
grad
,
b
,
a
->
grad
,
1.0
F
);
_Multiply
(
node
->
grad
,
a
,
b
->
grad
,
1.0
F
);
_Multiply
(
node
->
grad
,
a
,
b
->
grad
,
1.0
F
);
}
}
...
...
source/network/XBackwardShape.cpp
查看文件 @
7ae1562d
...
@@ -164,7 +164,7 @@ void XShapeGrad::GradMergeList(XTensor * node)
...
@@ -164,7 +164,7 @@ void XShapeGrad::GradMergeList(XTensor * node)
smallsGrad
.
Add
(
tail
->
grad
);
smallsGrad
.
Add
(
tail
->
grad
);
if
(
i
>
1
){
if
(
i
>
1
){
CheckNTErrors
(
XTensor
::
Is
Identical
(
last
,
tail
),
CheckNTErrors
(
XTensor
::
Is
SameShaped
(
last
,
tail
),
"Input tensors must be of the same size!"
);
"Input tensors must be of the same size!"
);
}
}
...
...
source/network/XNoder.cpp
查看文件 @
7ae1562d
...
@@ -29,7 +29,7 @@ void XNoder::MakeGrad(XTensor * node)
...
@@ -29,7 +29,7 @@ void XNoder::MakeGrad(XTensor * node)
if
(
node
==
NULL
)
if
(
node
==
NULL
)
return
;
return
;
if
(
!
XTensor
::
Is
Identical
(
node
,
node
->
grad
)){
if
(
!
XTensor
::
Is
SameShaped
(
node
,
node
->
grad
)){
delete
node
->
grad
;
delete
node
->
grad
;
node
->
grad
=
NewTensor
(
node
);
node
->
grad
=
NewTensor
(
node
);
node
->
grad
->
SetZeroAll
();
node
->
grad
->
SetZeroAll
();
...
...
source/tensor/XTensor.cpp
查看文件 @
7ae1562d
...
@@ -370,7 +370,7 @@ judge whether the two matrices are in the same type and size
...
@@ -370,7 +370,7 @@ judge whether the two matrices are in the same type and size
>> b - anther tensor to compare with
>> b - anther tensor to compare with
<< return - whether the two input tensors are identical
<< return - whether the two input tensors are identical
*/
*/
bool
XTensor
::
Is
Identical
(
const
XTensor
*
a
,
const
XTensor
*
b
)
bool
XTensor
::
Is
SameShaped
(
const
XTensor
*
a
,
const
XTensor
*
b
)
{
{
if
(
a
==
NULL
||
b
==
NULL
)
if
(
a
==
NULL
||
b
==
NULL
)
return
false
;
return
false
;
...
@@ -402,9 +402,9 @@ judge whether the three matrices are in the same type and size
...
@@ -402,9 +402,9 @@ judge whether the three matrices are in the same type and size
>> c - a tensor again
>> c - a tensor again
<< return - whether the two input tensors are identical
<< return - whether the two input tensors are identical
*/
*/
bool
XTensor
::
Is
Identical
(
XTensor
*
a
,
XTensor
*
b
,
XTensor
*
c
)
bool
XTensor
::
Is
SameShaped
(
XTensor
*
a
,
XTensor
*
b
,
XTensor
*
c
)
{
{
return
Is
Identical
(
a
,
b
)
&&
IsIdentical
(
a
,
c
);
return
Is
SameShaped
(
a
,
b
)
&&
IsSameShaped
(
a
,
c
);
}
}
/*
/*
...
...
source/tensor/XTensor.h
查看文件 @
7ae1562d
...
@@ -207,11 +207,11 @@ public:
...
@@ -207,11 +207,11 @@ public:
/* judge whether the two matrices are in the same type and size */
/* judge whether the two matrices are in the same type and size */
static
static
bool
Is
Identical
(
const
XTensor
*
a
,
const
XTensor
*
b
);
bool
Is
SameShaped
(
const
XTensor
*
a
,
const
XTensor
*
b
);
/* judge whether the three matrices are in the same type and size */
/* judge whether the three matrices are in the same type and size */
static
static
bool
Is
Identical
(
XTensor
*
a
,
XTensor
*
b
,
XTensor
*
c
);
bool
Is
SameShaped
(
XTensor
*
a
,
XTensor
*
b
,
XTensor
*
c
);
/* set the size of each dimension */
/* set the size of each dimension */
void
SetDim
(
int
*
myDimSize
);
void
SetDim
(
int
*
myDimSize
);
...
...
source/tensor/core/arithmetic/Absolute.cpp
查看文件 @
7ae1562d
...
@@ -42,7 +42,7 @@ void _Absolute(const XTensor * a, XTensor * b)
...
@@ -42,7 +42,7 @@ void _Absolute(const XTensor * a, XTensor * b)
}
}
#endif
#endif
CheckNTErrors
((
XTensor
::
Is
Identical
(
a
,
b
)),
"Input tensors should have the same type!"
);
CheckNTErrors
((
XTensor
::
Is
SameShaped
(
a
,
b
)),
"Input tensors should have the same type!"
);
CheckNTErrors
((
a
->
dataType
==
DEFAULT_DTYPE
),
"TODO!"
);
CheckNTErrors
((
a
->
dataType
==
DEFAULT_DTYPE
),
"TODO!"
);
DTYPE
*
d
=
(
DTYPE
*
)
a
->
data
;
DTYPE
*
d
=
(
DTYPE
*
)
a
->
data
;
DTYPE
*
db
=
(
DTYPE
*
)
b
->
data
;
DTYPE
*
db
=
(
DTYPE
*
)
b
->
data
;
...
...
source/tensor/core/arithmetic/Absolute.cu
查看文件 @
7ae1562d
...
@@ -63,7 +63,7 @@ set each entry to its absolute value
...
@@ -63,7 +63,7 @@ set each entry to its absolute value
extern "C"
extern "C"
void _CudaAbsolute(const XTensor * a, XTensor * b)
void _CudaAbsolute(const XTensor * a, XTensor * b)
{
{
CheckNTErrors((XTensor::Is
Identical
(a, b)), "Input tensors should have the same type!");
CheckNTErrors((XTensor::Is
SameShaped
(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->isSparse == false), "TODO!");
CheckNTErrors((a->isSparse == false), "TODO!");
int gridSize[3];
int gridSize[3];
...
...
source/tensor/core/arithmetic/MatrixMULBatchedCPU.cpp
查看文件 @
7ae1562d
...
@@ -55,9 +55,9 @@ void _MatrixMULBatchedCPU(const XList * a, MATRIX_TRANS_TYPE transposedA,
...
@@ -55,9 +55,9 @@ void _MatrixMULBatchedCPU(const XList * a, MATRIX_TRANS_TYPE transposedA,
XTensor
*
ai
=
(
XTensor
*
)
a
->
GetItem
(
i
);
XTensor
*
ai
=
(
XTensor
*
)
a
->
GetItem
(
i
);
XTensor
*
bi
=
(
XTensor
*
)
b
->
GetItem
(
i
);
XTensor
*
bi
=
(
XTensor
*
)
b
->
GetItem
(
i
);
XTensor
*
ci
=
(
XTensor
*
)
c
->
GetItem
(
i
);
XTensor
*
ci
=
(
XTensor
*
)
c
->
GetItem
(
i
);
if
(
!
XTensor
::
Is
Identical
(
aim
,
ai
)
||
if
(
!
XTensor
::
Is
SameShaped
(
aim
,
ai
)
||
!
XTensor
::
Is
Identical
(
bim
,
bi
)
||
!
XTensor
::
Is
SameShaped
(
bim
,
bi
)
||
!
XTensor
::
Is
Identical
(
cim
,
ci
))
!
XTensor
::
Is
SameShaped
(
cim
,
ci
))
{
{
isUniform
=
false
;
isUniform
=
false
;
break
;
break
;
...
...
source/tensor/core/arithmetic/Negate.cpp
查看文件 @
7ae1562d
...
@@ -41,7 +41,7 @@ void _Negate(const XTensor * a, XTensor * b)
...
@@ -41,7 +41,7 @@ void _Negate(const XTensor * a, XTensor * b)
}
}
#endif
#endif
CheckNTErrors
((
XTensor
::
Is
Identical
(
a
,
b
)),
"Input tensors should have the same type!"
);
CheckNTErrors
((
XTensor
::
Is
SameShaped
(
a
,
b
)),
"Input tensors should have the same type!"
);
CheckNTErrors
((
a
->
dataType
==
DEFAULT_DTYPE
),
"TODO!"
);
CheckNTErrors
((
a
->
dataType
==
DEFAULT_DTYPE
),
"TODO!"
);
DTYPE
*
d
=
(
DTYPE
*
)
a
->
data
;
DTYPE
*
d
=
(
DTYPE
*
)
a
->
data
;
DTYPE
*
db
=
(
DTYPE
*
)
b
->
data
;
DTYPE
*
db
=
(
DTYPE
*
)
b
->
data
;
...
...
source/tensor/core/arithmetic/Negate.cu
查看文件 @
7ae1562d
...
@@ -71,7 +71,7 @@ set each entry to its negtive value
...
@@ -71,7 +71,7 @@ set each entry to its negtive value
extern "C"
extern "C"
void _CudaNegate(const XTensor * a, XTensor * b)
void _CudaNegate(const XTensor * a, XTensor * b)
{
{
CheckNTErrors((XTensor::Is
Identical
(a, b)), "Input tensors should have the same type!");
CheckNTErrors((XTensor::Is
SameShaped
(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->isSparse == false), "TODO!");
CheckNTErrors((a->isSparse == false), "TODO!");
int gridSize[3];
int gridSize[3];
...
...
source/tensor/core/arithmetic/Sign.cpp
查看文件 @
7ae1562d
...
@@ -41,7 +41,7 @@ void _Sign(const XTensor * a, XTensor * b)
...
@@ -41,7 +41,7 @@ void _Sign(const XTensor * a, XTensor * b)
}
}
#endif
#endif
CheckNTErrors
((
XTensor
::
Is
Identical
(
a
,
b
)),
"Input tensors should have the same type!"
);
CheckNTErrors
((
XTensor
::
Is
SameShaped
(
a
,
b
)),
"Input tensors should have the same type!"
);
CheckNTErrors
((
a
->
dataType
==
DEFAULT_DTYPE
),
"TODO!"
);
CheckNTErrors
((
a
->
dataType
==
DEFAULT_DTYPE
),
"TODO!"
);
DTYPE
*
d
=
(
DTYPE
*
)
a
->
data
;
DTYPE
*
d
=
(
DTYPE
*
)
a
->
data
;
DTYPE
*
db
=
(
DTYPE
*
)
b
->
data
;
DTYPE
*
db
=
(
DTYPE
*
)
b
->
data
;
...
...
source/tensor/core/arithmetic/Sign.cu
查看文件 @
7ae1562d
...
@@ -69,7 +69,7 @@ set each entry to its sign value
...
@@ -69,7 +69,7 @@ set each entry to its sign value
extern "C"
extern "C"
void _CudaSign(const XTensor * a, XTensor * b)
void _CudaSign(const XTensor * a, XTensor * b)
{
{
CheckNTErrors((XTensor::Is
Identical
(a, b)), "Input tensors should have the same type!");
CheckNTErrors((XTensor::Is
SameShaped
(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->isSparse == false), "TODO!");
CheckNTErrors((a->isSparse == false), "TODO!");
int gridSize[3];
int gridSize[3];
...
...
source/tensor/core/arithmetic/SumByColumnTV.cpp
查看文件 @
7ae1562d
...
@@ -40,7 +40,7 @@ where b is a vector.
...
@@ -40,7 +40,7 @@ where b is a vector.
void
_SumByColumnTV
(
const
XTensor
*
a
,
const
XTensor
*
b
,
XTensor
*
c
,
DTYPE
beta
)
void
_SumByColumnTV
(
const
XTensor
*
a
,
const
XTensor
*
b
,
XTensor
*
c
,
DTYPE
beta
)
{
{
CheckNTErrors
((
a
&&
b
&&
c
),
"Empty input tensors!"
);
CheckNTErrors
((
a
&&
b
&&
c
),
"Empty input tensors!"
);
CheckNTErrors
((
XTensor
::
Is
Identical
(
a
,
c
)),
"Unmatched tensors in addition!"
);
CheckNTErrors
((
XTensor
::
Is
SameShaped
(
a
,
c
)),
"Unmatched tensors in addition!"
);
CheckNTErrors
((
b
->
order
==
2
&&
b
->
dimSizeRDI
[
0
]
==
1
&&
b
->
dimSizeRDI
[
1
]
==
a
->
dimSizeRDI
[
1
]),
CheckNTErrors
((
b
->
order
==
2
&&
b
->
dimSizeRDI
[
0
]
==
1
&&
b
->
dimSizeRDI
[
1
]
==
a
->
dimSizeRDI
[
1
]),
"Illegal input vector size!"
);
"Illegal input vector size!"
);
...
...
source/tensor/core/arithmetic/SumByColumnTV.cu
查看文件 @
7ae1562d
...
@@ -67,7 +67,7 @@ where b is a vector.
...
@@ -67,7 +67,7 @@ where b is a vector.
void _CudaSumByColumnTV(const XTensor * a, const XTensor * b, XTensor * c, DTYPE beta)
void _CudaSumByColumnTV(const XTensor * a, const XTensor * b, XTensor * c, DTYPE beta)
{
{
CheckNTErrors((a && b && c), "Empty input tensors!");
CheckNTErrors((a && b && c), "Empty input tensors!");
CheckNTErrors((XTensor::Is
Identical
(a, c)), "Unmatched tensors in addition!");
CheckNTErrors((XTensor::Is
SameShaped
(a, c)), "Unmatched tensors in addition!");
CheckNTErrors((b->order == 2 && b->dimSizeRDI[0] == 1 && b->dimSizeRDI[1] == a->dimSizeRDI[1]),
CheckNTErrors((b->order == 2 && b->dimSizeRDI[0] == 1 && b->dimSizeRDI[1] == a->dimSizeRDI[1]),
"Illegal input vector size!");
"Illegal input vector size!");
CheckNTErrors((a->dataType == DEFAULT_DTYPE && b->dataType == DEFAULT_DTYPE &&
CheckNTErrors((a->dataType == DEFAULT_DTYPE && b->dataType == DEFAULT_DTYPE &&
...
...
source/tensor/core/arithmetic/SumByColumnVT.cpp
查看文件 @
7ae1562d
...
@@ -40,7 +40,7 @@ where c and a are vectors, and b_col is a column in b.
...
@@ -40,7 +40,7 @@ where c and a are vectors, and b_col is a column in b.
void
_SumByColumnVT
(
const
XTensor
*
a
,
const
XTensor
*
b
,
XTensor
*
c
,
DTYPE
beta
)
void
_SumByColumnVT
(
const
XTensor
*
a
,
const
XTensor
*
b
,
XTensor
*
c
,
DTYPE
beta
)
{
{
CheckNTErrors
((
a
&&
b
&&
c
),
"Empty input tensors!"
);
CheckNTErrors
((
a
&&
b
&&
c
),
"Empty input tensors!"
);
CheckNTErrors
((
XTensor
::
Is
Identical
(
a
,
c
)),
"Unmatched tensors in addition!"
);
CheckNTErrors
((
XTensor
::
Is
SameShaped
(
a
,
c
)),
"Unmatched tensors in addition!"
);
CheckNTErrors
((
a
->
order
==
2
&&
a
->
dimSizeRDI
[
0
]
==
1
&&
b
->
dimSizeRDI
[
1
]
==
a
->
dimSizeRDI
[
1
]),
CheckNTErrors
((
a
->
order
==
2
&&
a
->
dimSizeRDI
[
0
]
==
1
&&
b
->
dimSizeRDI
[
1
]
==
a
->
dimSizeRDI
[
1
]),
"Illegal input vector size!"
);
"Illegal input vector size!"
);
...
...
source/tensor/core/arithmetic/SumByColumnVT.cu
查看文件 @
7ae1562d
...
@@ -83,7 +83,7 @@ where c and a are vectors, and b_col is a column in b.
...
@@ -83,7 +83,7 @@ where c and a are vectors, and b_col is a column in b.
void _CudaSumByColumnVT(const XTensor * a, const XTensor * b, XTensor * c, DTYPE beta)
void _CudaSumByColumnVT(const XTensor * a, const XTensor * b, XTensor * c, DTYPE beta)
{
{
CheckNTErrors((a && b && c), "Empty input tensors!");
CheckNTErrors((a && b && c), "Empty input tensors!");
CheckNTErrors((XTensor::Is
Identical
(a, c)), "Unmatched tensors in addition!");
CheckNTErrors((XTensor::Is
SameShaped
(a, c)), "Unmatched tensors in addition!");
CheckNTErrors((a->order == 2 && a->dimSizeRDI[0] == 1 && b->dimSizeRDI[1] == a->dimSizeRDI[1]),
CheckNTErrors((a->order == 2 && a->dimSizeRDI[0] == 1 && b->dimSizeRDI[1] == a->dimSizeRDI[1]),
"Illegal input vector size!");
"Illegal input vector size!");
CheckNTErrors((a->dataType == DEFAULT_DTYPE && b->dataType == DEFAULT_DTYPE &&
CheckNTErrors((a->dataType == DEFAULT_DTYPE && b->dataType == DEFAULT_DTYPE &&
...
...
source/tensor/core/arithmetic/XTensorBLAS.cu
查看文件 @
7ae1562d
...
@@ -225,9 +225,9 @@ void _CudaBLASMatrixMULList(cublasHandle_t * handle,
...
@@ -225,9 +225,9 @@ void _CudaBLASMatrixMULList(cublasHandle_t * handle,
XTensor * ai = (XTensor*)a->GetItem(i);
XTensor * ai = (XTensor*)a->GetItem(i);
XTensor * bi = (XTensor*)b->GetItem(i);
XTensor * bi = (XTensor*)b->GetItem(i);
XTensor * ci = (XTensor*)c->GetItem(i);
XTensor * ci = (XTensor*)c->GetItem(i);
if (!XTensor::Is
Identical
(aim, ai) ||
if (!XTensor::Is
SameShaped
(aim, ai) ||
!XTensor::Is
Identical
(bim, bi) ||
!XTensor::Is
SameShaped
(bim, bi) ||
!XTensor::Is
Identical
(cim, ci))
!XTensor::Is
SameShaped
(cim, ci))
{
{
isUniform = false;
isUniform = false;
break;
break;
...
...
source/tensor/core/math/Log.cpp
查看文件 @
7ae1562d
...
@@ -42,7 +42,7 @@ void _Log(const XTensor * a, XTensor * b)
...
@@ -42,7 +42,7 @@ void _Log(const XTensor * a, XTensor * b)
}
}
#endif
#endif
CheckNTErrors
((
XTensor
::
Is
Identical
(
a
,
b
)),
"Input tensors should have the same type!"
);
CheckNTErrors
((
XTensor
::
Is
SameShaped
(
a
,
b
)),
"Input tensors should have the same type!"
);
CheckNTErrors
((
a
->
dataType
==
DEFAULT_DTYPE
),
"TODO!"
);
CheckNTErrors
((
a
->
dataType
==
DEFAULT_DTYPE
),
"TODO!"
);
DTYPE
*
d
=
(
DTYPE
*
)
a
->
data
;
DTYPE
*
d
=
(
DTYPE
*
)
a
->
data
;
DTYPE
*
db
=
(
DTYPE
*
)
b
->
data
;
DTYPE
*
db
=
(
DTYPE
*
)
b
->
data
;
...
...
source/tensor/core/math/Log.cu
查看文件 @
7ae1562d
...
@@ -63,7 +63,7 @@ set each entry to its log value
...
@@ -63,7 +63,7 @@ set each entry to its log value
extern "C"
extern "C"
void _CudaLog(const XTensor * a, XTensor * b)
void _CudaLog(const XTensor * a, XTensor * b)
{
{
CheckNTErrors((XTensor::Is
Identical
(a, b)), "Input tensors should have the same type!");
CheckNTErrors((XTensor::Is
SameShaped
(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->isSparse == false), "TODO!");
CheckNTErrors((a->isSparse == false), "TODO!");
int gridSize[3];
int gridSize[3];
...
...
source/tensor/core/math/Normalize.cpp
查看文件 @
7ae1562d
...
@@ -45,9 +45,9 @@ where a and b are the scalar and bias respectively, and \epsilon is the adjustme
...
@@ -45,9 +45,9 @@ where a and b are the scalar and bias respectively, and \epsilon is the adjustme
void
_Normalize
(
const
XTensor
*
input
,
XTensor
*
output
,
int
dim
,
const
XTensor
*
mean
,
const
XTensor
*
var
,
const
XTensor
*
a
,
const
XTensor
*
b
,
DTYPE
epsilon
)
void
_Normalize
(
const
XTensor
*
input
,
XTensor
*
output
,
int
dim
,
const
XTensor
*
mean
,
const
XTensor
*
var
,
const
XTensor
*
a
,
const
XTensor
*
b
,
DTYPE
epsilon
)
{
{
int
dimRDI
=
input
->
order
-
dim
-
1
;
int
dimRDI
=
input
->
order
-
dim
-
1
;
CheckNTErrors
((
XTensor
::
Is
Identical
(
input
,
output
)),
"Unmatched input tensors!"
);
CheckNTErrors
((
XTensor
::
Is
SameShaped
(
input
,
output
)),
"Unmatched input tensors!"
);
CheckNTErrors
((
XTensor
::
Is
Identical
(
a
,
b
)),
"Unmatched input tensors"
);
CheckNTErrors
((
XTensor
::
Is
SameShaped
(
a
,
b
)),
"Unmatched input tensors"
);
CheckNTErrors
((
XTensor
::
Is
Identical
(
mean
,
var
)),
"Unmatched input tensors"
);
CheckNTErrors
((
XTensor
::
Is
SameShaped
(
mean
,
var
)),
"Unmatched input tensors"
);
CheckNTErrors
((
input
&&
output
&&
mean
&&
var
&&
a
&&
b
),
"Empty input tensors!"
);
CheckNTErrors
((
input
&&
output
&&
mean
&&
var
&&
a
&&
b
),
"Empty input tensors!"
);
CheckNTErrors
((
dimRDI
>=
0
&&
dimRDI
<
input
->
order
),
"Incorrect reduction dimension!"
);
CheckNTErrors
((
dimRDI
>=
0
&&
dimRDI
<
input
->
order
),
"Incorrect reduction dimension!"
);
CheckNTErrors
((
dimRDI
==
a
->
order
-
1
),
"Incorrect reduction dimension!"
);
CheckNTErrors
((
dimRDI
==
a
->
order
-
1
),
"Incorrect reduction dimension!"
);
...
...
source/tensor/core/math/Power.cu
查看文件 @
7ae1562d
...
@@ -103,7 +103,7 @@ void KernelPower(__half * a, __half * b, __half p, int size)
...
@@ -103,7 +103,7 @@ void KernelPower(__half * a, __half * b, __half p, int size)
extern "C"
extern "C"
void _CudaPower(const XTensor * a, XTensor * b, DTYPE p)
void _CudaPower(const XTensor * a, XTensor * b, DTYPE p)
{
{
CheckNTErrors((XTensor::Is
Identical
(a, b)), "Input tensors should have the same type!");
CheckNTErrors((XTensor::Is
SameShaped
(a, b)), "Input tensors should have the same type!");
int gridSize[3];
int gridSize[3];
int blockSize[3];
int blockSize[3];
...
...
source/tensor/core/movement/CopyInGrid.cpp
查看文件 @
7ae1562d
...
@@ -38,7 +38,7 @@ in the k-th grid
...
@@ -38,7 +38,7 @@ in the k-th grid
*/
*/
void
_CopyInGrid
(
const
XTensor
*
s
,
XTensor
*
t
,
int
*
index
,
int
blockDim
,
int
blockNumInGrid
,
bool
isIndexOnDev
)
void
_CopyInGrid
(
const
XTensor
*
s
,
XTensor
*
t
,
int
*
index
,
int
blockDim
,
int
blockNumInGrid
,
bool
isIndexOnDev
)
{
{
CheckNTErrors
((
XTensor
::
Is
Identical
(
s
,
t
)),
"Unmatched tensors!"
);
CheckNTErrors
((
XTensor
::
Is
SameShaped
(
s
,
t
)),
"Unmatched tensors!"
);
int
blockDimRDI
=
s
->
order
-
blockDim
-
1
;
int
blockDimRDI
=
s
->
order
-
blockDim
-
1
;
int
blockSize
=
1
;
int
blockSize
=
1
;
...
...
source/tensor/core/reduce/ReduceSum.cpp
查看文件 @
7ae1562d
...
@@ -48,7 +48,7 @@ void _ReduceSum(const XTensor * input, XTensor * output, int dim, const XTensor
...
@@ -48,7 +48,7 @@ void _ReduceSum(const XTensor * input, XTensor * output, int dim, const XTensor
CheckNTErrors
((
input
->
order
==
output
->
order
+
1
),
"Incorrect tensor sizes!"
);
CheckNTErrors
((
input
->
order
==
output
->
order
+
1
),
"Incorrect tensor sizes!"
);
CheckNTErrors
((
input
->
order
>
dim
&&
dim
>=
0
),
"Illegal dimension to reduce!"
);
CheckNTErrors
((
input
->
order
>
dim
&&
dim
>=
0
),
"Illegal dimension to reduce!"
);
CheckNTErrors
((
input
->
dataType
==
output
->
dataType
),
"Unmatched data types!"
);
CheckNTErrors
((
input
->
dataType
==
output
->
dataType
),
"Unmatched data types!"
);
CheckNTErrors
((
shift
==
NULL
||
XTensor
::
Is
Identical
(
output
,
shift
)),
"Incorrect shift tensor size!"
);
CheckNTErrors
((
shift
==
NULL
||
XTensor
::
Is
SameShaped
(
output
,
shift
)),
"Incorrect shift tensor size!"
);
int
dimRDI
=
input
->
order
-
dim
-
1
;
int
dimRDI
=
input
->
order
-
dim
-
1
;
for
(
int
i
=
0
;
i
<
input
->
order
;
i
++
){
for
(
int
i
=
0
;
i
<
input
->
order
;
i
++
){
...
...
source/tensor/core/shape/Concatenate.cpp
查看文件 @
7ae1562d
...
@@ -44,7 +44,7 @@ void _Concatenate(const XList * smalls, XTensor * big, int dim)
...
@@ -44,7 +44,7 @@ void _Concatenate(const XList * smalls, XTensor * big, int dim)
XTensor
*
a
=
(
XTensor
*
)
smalls
->
GetItem
(
i
-
1
);
XTensor
*
a
=
(
XTensor
*
)
smalls
->
GetItem
(
i
-
1
);
XTensor
*
b
=
(
XTensor
*
)
smalls
->
GetItem
(
i
);
XTensor
*
b
=
(
XTensor
*
)
smalls
->
GetItem
(
i
);
CheckNTErrors
((
a
&&
b
),
"Empty input tensors!"
);
CheckNTErrors
((
a
&&
b
),
"Empty input tensors!"
);
if
(
!
XTensor
::
Is
Identical
(
a
,
b
))
if
(
!
XTensor
::
Is
SameShaped
(
a
,
b
))
uniform
=
false
;
uniform
=
false
;
}
}
...
@@ -76,7 +76,7 @@ XTensor Concatenate(const XList &smalls, int dim)
...
@@ -76,7 +76,7 @@ XTensor Concatenate(const XList &smalls, int dim)
XTensor
*
a
=
(
XTensor
*
)
smalls
.
GetItem
(
i
-
1
);
XTensor
*
a
=
(
XTensor
*
)
smalls
.
GetItem
(
i
-
1
);
XTensor
*
b
=
(
XTensor
*
)
smalls
.
GetItem
(
i
);
XTensor
*
b
=
(
XTensor
*
)
smalls
.
GetItem
(
i
);
CheckNTErrors
((
a
&&
b
),
"Empty input tensors!"
);
CheckNTErrors
((
a
&&
b
),
"Empty input tensors!"
);
if
(
!
XTensor
::
Is
Identical
(
a
,
b
))
if
(
!
XTensor
::
Is
SameShaped
(
a
,
b
))
uniform
=
false
;
uniform
=
false
;
}
}
XTensor
*
tensor
=
(
XTensor
*
)
smalls
.
GetItem
(
0
);
XTensor
*
tensor
=
(
XTensor
*
)
smalls
.
GetItem
(
0
);
...
@@ -177,7 +177,7 @@ XTensor Concatenate(const XTensor &smallA, const XTensor &smallB, int dim)
...
@@ -177,7 +177,7 @@ XTensor Concatenate(const XTensor &smallA, const XTensor &smallB, int dim)
XTensor
*
a
=
(
XTensor
*
)
smalls
.
Get
(
i
-
1
);
XTensor
*
a
=
(
XTensor
*
)
smalls
.
Get
(
i
-
1
);
XTensor
*
b
=
(
XTensor
*
)
smalls
.
Get
(
i
);
XTensor
*
b
=
(
XTensor
*
)
smalls
.
Get
(
i
);
CheckNTErrors
((
a
&&
b
),
"Empty input tensors!"
);
CheckNTErrors
((
a
&&
b
),
"Empty input tensors!"
);
if
(
!
XTensor
::
Is
Identical
(
a
,
b
))
if
(
!
XTensor
::
Is
SameShaped
(
a
,
b
))
uniform
=
false
;
uniform
=
false
;
}
}
XTensor
*
tensor
=
(
XTensor
*
)
smalls
.
Get
(
0
);
XTensor
*
tensor
=
(
XTensor
*
)
smalls
.
Get
(
0
);
...
...
source/tensor/core/shape/Merge.cpp
查看文件 @
7ae1562d
...
@@ -356,7 +356,7 @@ merge two tensors into a big tensor (return a XTensor structure)
...
@@ -356,7 +356,7 @@ merge two tensors into a big tensor (return a XTensor structure)
*/
*/
XTensor
Merge
(
const
XTensor
&
smallA
,
const
XTensor
&
smallB
,
int
whereToMerge
)
XTensor
Merge
(
const
XTensor
&
smallA
,
const
XTensor
&
smallB
,
int
whereToMerge
)
{
{
CheckNTErrors
(
XTensor
::
Is
Identical
(
&
smallA
,
&
smallB
),
CheckNTErrors
(
XTensor
::
Is
SameShaped
(
&
smallA
,
&
smallB
),
"The two tensors must be of the same size!"
);
"The two tensors must be of the same size!"
);
int
order
=
smallA
.
order
;
int
order
=
smallA
.
order
;
...
...
source/tensor/core/sort/Sort.cpp
查看文件 @
7ae1562d
...
@@ -36,7 +36,7 @@ sort the tensor along a given dimension
...
@@ -36,7 +36,7 @@ sort the tensor along a given dimension
*/
*/
void
_Sort
(
const
XTensor
*
a
,
XTensor
*
b
,
XTensor
*
index
,
int
dim
)
void
_Sort
(
const
XTensor
*
a
,
XTensor
*
b
,
XTensor
*
index
,
int
dim
)
{
{
CheckNTErrors
((
XTensor
::
Is
Identical
(
a
,
b
)),
"Input tensors should have the same type!"
);
CheckNTErrors
((
XTensor
::
Is
SameShaped
(
a
,
b
)),
"Input tensors should have the same type!"
);
CheckNTErrors
((
dim
>=
0
&&
dim
<
a
->
order
),
"Incorrect dimension specified!"
);
CheckNTErrors
((
dim
>=
0
&&
dim
<
a
->
order
),
"Incorrect dimension specified!"
);
CheckNTErrors
((
a
->
order
==
index
->
order
),
"Unmatched input tensors!"
);
CheckNTErrors
((
a
->
order
==
index
->
order
),
"Unmatched input tensors!"
);
CheckNTErrors
((
index
->
dataType
==
X_INT
),
"Wrong data type!"
);
CheckNTErrors
((
index
->
dataType
==
X_INT
),
"Wrong data type!"
);
...
...
source/tensor/function/HardTanH.cpp
查看文件 @
7ae1562d
...
@@ -106,7 +106,7 @@ void _HardTanHBackward(XTensor * gold, XTensor * y, XTensor * x,
...
@@ -106,7 +106,7 @@ void _HardTanHBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor
*
dedy
,
XTensor
*
dedx
,
XTensor
*
dedy
,
XTensor
*
dedx
,
LOSS_FUNCTION_NAME
lossName
)
LOSS_FUNCTION_NAME
lossName
)
{
{
CheckNTErrors
((
gold
==
NULL
||
XTensor
::
Is
Identical
(
gold
,
y
)),
CheckNTErrors
((
gold
==
NULL
||
XTensor
::
Is
SameShaped
(
gold
,
y
)),
"The tensors must be of the same size!"
);
"The tensors must be of the same size!"
);
#ifdef USE_CUDA
#ifdef USE_CUDA
...
...
source/tensor/function/Identity.cpp
查看文件 @
7ae1562d
...
@@ -72,7 +72,7 @@ void _IdentityBackward(XTensor * gold, XTensor * y, XTensor * x,
...
@@ -72,7 +72,7 @@ void _IdentityBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor
*
dedy
,
XTensor
*
dedx
,
XTensor
*
dedy
,
XTensor
*
dedx
,
LOSS_FUNCTION_NAME
lossName
)
LOSS_FUNCTION_NAME
lossName
)
{
{
CheckNTErrors
((
gold
==
NULL
||
XTensor
::
Is
Identical
(
gold
,
y
)),
CheckNTErrors
((
gold
==
NULL
||
XTensor
::
Is
SameShaped
(
gold
,
y
)),
"The tensors must be of the same size!"
);
"The tensors must be of the same size!"
);
if
(
x
->
dataType
==
DEFAULT_DTYPE
&&
y
->
dataType
==
DEFAULT_DTYPE
)
if
(
x
->
dataType
==
DEFAULT_DTYPE
&&
y
->
dataType
==
DEFAULT_DTYPE
)
...
...
source/tensor/function/LogSoftmax.cpp
查看文件 @
7ae1562d
...
@@ -309,7 +309,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
...
@@ -309,7 +309,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
}
}
}
}
else
{
else
{
CheckNTErrors
((
XTensor
::
Is
Identical
(
gold
,
y
)),
"The tensors must be of the same size!"
);
CheckNTErrors
((
XTensor
::
Is
SameShaped
(
gold
,
y
)),
"The tensors must be of the same size!"
);
for
(
int
k
=
0
;
k
<
blockNum
;
k
++
)
{
for
(
int
k
=
0
;
k
<
blockNum
;
k
++
)
{
gp
=
(
DTYPE
*
)
gold
->
data
+
k
*
blockSize
;
gp
=
(
DTYPE
*
)
gold
->
data
+
k
*
blockSize
;
op
=
(
DTYPE
*
)
y
->
data
+
k
*
blockSize
;
op
=
(
DTYPE
*
)
y
->
data
+
k
*
blockSize
;
...
@@ -363,7 +363,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
...
@@ -363,7 +363,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
}
}
}
}
else
{
else
{
CheckNTErrors
((
XTensor
::
Is
Identical
(
gold
,
y
)),
"The tensors must be of the same size!"
);
CheckNTErrors
((
XTensor
::
Is
SameShaped
(
gold
,
y
)),
"The tensors must be of the same size!"
);
for
(
int
k
=
0
;
k
<
blockNum
;
k
++
)
{
for
(
int
k
=
0
;
k
<
blockNum
;
k
++
)
{
gp
=
(
DTYPE
*
)
gold
->
data
+
k
*
blockSize
;
gp
=
(
DTYPE
*
)
gold
->
data
+
k
*
blockSize
;
op
=
(
DTYPE
*
)
y
->
data
+
k
*
blockSize
;
op
=
(
DTYPE
*
)
y
->
data
+
k
*
blockSize
;
...
...
source/tensor/function/LogSoftmax.cu
查看文件 @
7ae1562d
...
@@ -409,7 +409,7 @@ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
...
@@ -409,7 +409,7 @@ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
dedx->dimSize[0], dedx->dimSize[1], gold->unitNumNonZero, lossName);
dedx->dimSize[0], dedx->dimSize[1], gold->unitNumNonZero, lossName);
}
}
else {
else {
CheckNTErrors((XTensor::Is
Identical
(gold, y)), "The tensors must be of the same size!");
CheckNTErrors((XTensor::Is
SameShaped
(gold, y)), "The tensors must be of the same size!");
for (int k = 0; k < blockNum; k++) {
for (int k = 0; k < blockNum; k++) {
GDevs.GetCudaThread(x->devID, blockSize, cudaGridSize, cudaBlockSize);
GDevs.GetCudaThread(x->devID, blockSize, cudaGridSize, cudaBlockSize);
...
...
source/tensor/function/Loss.cpp
查看文件 @
7ae1562d
...
@@ -48,7 +48,7 @@ DTYPE _LossCompute(XTensor * gold, XTensor * output, LOSS_FUNCTION_NAME LFName,
...
@@ -48,7 +48,7 @@ DTYPE _LossCompute(XTensor * gold, XTensor * output, LOSS_FUNCTION_NAME LFName,
DTYPE
error
=
0.0
F
;
DTYPE
error
=
0.0
F
;
if
(
output
->
devID
<
0
)
{
if
(
output
->
devID
<
0
)
{
CheckNTErrors
((
gLen
>=
0
&&
gLen
<=
output
->
unitNum
),
"Illegal input length!"
);
CheckNTErrors
((
gLen
>=
0
&&
gLen
<=
output
->
unitNum
),
"Illegal input length!"
);
CheckNTErrors
((
XTensor
::
Is
Identical
(
gold
,
output
)),
"The input tensors must be of the same size!"
);
CheckNTErrors
((
XTensor
::
Is
SameShaped
(
gold
,
output
)),
"The input tensors must be of the same size!"
);
CheckNTErrors
((
gold
->
dimSizeRDI
[
0
]
==
1
&&
output
->
dimSizeRDI
[
0
]
==
1
),
"TODO!"
);
CheckNTErrors
((
gold
->
dimSizeRDI
[
0
]
==
1
&&
output
->
dimSizeRDI
[
0
]
==
1
),
"TODO!"
);
CheckNTErrors
((
gold
->
order
>
leadDim
&&
leadDim
>=
0
),
"Illegal leading dimension!"
);
CheckNTErrors
((
gold
->
order
>
leadDim
&&
leadDim
>=
0
),
"Illegal leading dimension!"
);
CheckNTErrors
((
gold
->
dataType
==
DEFAULT_DTYPE
&&
output
->
dataType
==
DEFAULT_DTYPE
),
CheckNTErrors
((
gold
->
dataType
==
DEFAULT_DTYPE
&&
output
->
dataType
==
DEFAULT_DTYPE
),
...
@@ -206,7 +206,7 @@ DTYPE _LossComputeForLogScale(XTensor * gold, XTensor * output,
...
@@ -206,7 +206,7 @@ DTYPE _LossComputeForLogScale(XTensor * gold, XTensor * output,
int
leadDim
,
int
gBeg
,
int
gLen
,
int
oBeg
)
int
leadDim
,
int
gBeg
,
int
gLen
,
int
oBeg
)
{
{
CheckNTErrors
(
gLen
>=
0
&&
gLen
<=
output
->
unitNum
,
"Illegal input length!"
);
CheckNTErrors
(
gLen
>=
0
&&
gLen
<=
output
->
unitNum
,
"Illegal input length!"
);
CheckNTErrors
(
XTensor
::
Is
Identical
(
gold
,
output
),
"The input tensors must be of the same size!"
);
CheckNTErrors
(
XTensor
::
Is
SameShaped
(
gold
,
output
),
"The input tensors must be of the same size!"
);
CheckNTErrors
(
gold
->
dimSizeRDI
[
0
]
==
1
&&
output
->
dimSizeRDI
[
0
]
==
1
,
"TODO!"
);
CheckNTErrors
(
gold
->
dimSizeRDI
[
0
]
==
1
&&
output
->
dimSizeRDI
[
0
]
==
1
,
"TODO!"
);
CheckNTErrors
(
gold
->
order
>
leadDim
&&
leadDim
>=
0
,
"Illegal leading dimension!"
);
CheckNTErrors
(
gold
->
order
>
leadDim
&&
leadDim
>=
0
,
"Illegal leading dimension!"
);
CheckNTErrors
(
gold
->
dataType
==
DEFAULT_DTYPE
&&
output
->
dataType
==
DEFAULT_DTYPE
,
"TODO!"
);
CheckNTErrors
(
gold
->
dataType
==
DEFAULT_DTYPE
&&
output
->
dataType
==
DEFAULT_DTYPE
,
"TODO!"
);
...
@@ -402,9 +402,10 @@ void _LossBackward(XTensor * dedy, XTensor * t, XTensor * y,
...
@@ -402,9 +402,10 @@ void _LossBackward(XTensor * dedy, XTensor * t, XTensor * y,
if
(
y
->
devID
<
0
)
{
if
(
y
->
devID
<
0
)
{
CheckNTErrors
(
tLen
<=
y
->
unitNum
,
"Illegal input length!"
);
CheckNTErrors
(
tLen
<=
y
->
unitNum
,
"Illegal input length!"
);
CheckNTErrors
(
XTensor
::
IsIdentical
(
t
,
y
)
&&
XTensor
::
IsIdentical
(
dedy
,
y
),
CheckNTErrors
(
XTensor
::
IsSameShaped
(
t
,
y
)
&&
XTensor
::
IsSameShaped
(
dedy
,
y
),
"The input tensors must be of the same size!"
);
"The input tensors must be of the same size!"
);
CheckNTErrors
((
dedy
->
devID
==
t
->
devID
)
&&
(
dedy
->
devID
==
y
->
devID
),
"Tensor must be on the same device!"
);
CheckNTErrors
((
dedy
->
devID
==
t
->
devID
)
&&
(
dedy
->
devID
==
y
->
devID
),
"Tensor must be on the same device!"
);
CheckNTErrors
(
t
->
order
>
leadDim
,
"Illegal leading dimension!"
);
CheckNTErrors
(
t
->
order
>
leadDim
,
"Illegal leading dimension!"
);
CheckNTErrors
(
t
->
dataType
==
DEFAULT_DTYPE
&&
y
->
dataType
==
DEFAULT_DTYPE
,
"TODO!"
);
CheckNTErrors
(
t
->
dataType
==
DEFAULT_DTYPE
&&
y
->
dataType
==
DEFAULT_DTYPE
,
"TODO!"
);
...
...
source/tensor/function/Loss.cu
查看文件 @
7ae1562d
...
@@ -55,7 +55,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
...
@@ -55,7 +55,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
bool isLogOutput, int leadDim, int gBeg, int gLen, int yBeg)
bool isLogOutput, int leadDim, int gBeg, int gLen, int yBeg)
{
{
CheckNTErrors((gLen >= 0 && gLen <= y->unitNum), "Illegal input length!");
CheckNTErrors((gLen >= 0 && gLen <= y->unitNum), "Illegal input length!");
CheckNTErrors((XTensor::Is
Identical
(gold, y)), "The input tensors must be of the same size!");
CheckNTErrors((XTensor::Is
SameShaped
(gold, y)), "The input tensors must be of the same size!");
CheckNTErrors((gold->dimSizeRDI[0] == 1 && y->dimSizeRDI[0] == 1), "TODO!");
CheckNTErrors((gold->dimSizeRDI[0] == 1 && y->dimSizeRDI[0] == 1), "TODO!");
CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!");
CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!");
CheckNTErrors((gold->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE),
CheckNTErrors((gold->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE),
...
@@ -333,20 +333,21 @@ void _CudaLossBackward(XTensor * dedy, XTensor * t, XTensor * y,
...
@@ -333,20 +333,21 @@ void _CudaLossBackward(XTensor * dedy, XTensor * t, XTensor * y,
int leadDim, int tBeg, int tLen, int yBeg)
int leadDim, int tBeg, int tLen, int yBeg)
{
{
CheckNTErrors((tLen <= y->unitNum), "Illegal input length!");
CheckNTErrors((tLen <= y->unitNum), "Illegal input length!");
CheckNTErrors((XTensor::IsIdentical(t, y)&& XTensor::IsIdentical(dedy, y)),
CheckNTErrors((XTensor::IsSameShaped(t, y)&& XTensor::IsSameShaped(dedy, y)),
"The input tensors must be of the same size!");
"The input tensors must be of the same size!");
CheckNTErrors(((dedy->devID == t->devID) && (dedy->devID == y->devID)), "Tensor must be on the same device!");
CheckNTErrors(((dedy->devID == t->devID) && (dedy->devID == y->devID)),
"Tensor must be on the same device!");
CheckNTErrors((t->order > leadDim), "Illegal leading dimension!");
CheckNTErrors((t->order > leadDim), "Illegal leading dimension!");
CheckNTErrors((t->dataType == DEFAULT_DTYPE &&
CheckNTErrors((t->dataType == DEFAULT_DTYPE &&
y->dataType == DEFAULT_DTYPE &&
y->dataType == DEFAULT_DTYPE &&
dedy->dataType == DEFAULT_DTYPE),
dedy->dataType == DEFAULT_DTYPE),
"Input vectors are not in default type.");
"Input vectors are not in default type.");
CheckNTErrors((dedy->devID >= 0 && t->devID >= 0 && y->devID >= 0),
CheckNTErrors((dedy->devID >= 0 && t->devID >= 0 && y->devID >= 0),
"The backward compuation must be performed on GPUs.");
"The backward compuation must be performed on GPUs.");
CheckNTErrors((dedy->devID == t->devID && dedy->devID == y->devID),
CheckNTErrors((dedy->devID == t->devID && dedy->devID == y->devID),
"The vectors must be on the same GPU.");
"The vectors must be on the same GPU.");
CheckNTErrors((tBeg == yBeg), "TODO!");
CheckNTErrors((tBeg == yBeg), "TODO!");
int leadDimRDI = leadDim >= 0 ? y->order - leadDim - 1 : -1;
int leadDimRDI = leadDim >= 0 ? y->order - leadDim - 1 : -1;
...
...
source/tensor/function/Rectify.cpp
查看文件 @
7ae1562d
...
@@ -103,7 +103,7 @@ void _RectifyBackward(XTensor * gold, XTensor * y, XTensor * x,
...
@@ -103,7 +103,7 @@ void _RectifyBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor
*
dedy
,
XTensor
*
dedx
,
XTensor
*
dedy
,
XTensor
*
dedx
,
LOSS_FUNCTION_NAME
lossName
)
LOSS_FUNCTION_NAME
lossName
)
{
{
CheckNTErrors
((
gold
==
NULL
||
XTensor
::
Is
Identical
(
gold
,
y
)),
CheckNTErrors
((
gold
==
NULL
||
XTensor
::
Is
SameShaped
(
gold
,
y
)),
"The tensors must be of the same size!"
);
"The tensors must be of the same size!"
);
#ifdef USE_CUDA
#ifdef USE_CUDA
...
...
source/tensor/function/Sigmoid.cpp
查看文件 @
7ae1562d
...
@@ -94,8 +94,8 @@ void _SigmoidBackward(XTensor * gold, XTensor * y, XTensor * x,
...
@@ -94,8 +94,8 @@ void _SigmoidBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor
*
dedy
,
XTensor
*
dedx
,
XTensor
*
dedy
,
XTensor
*
dedx
,
LOSS_FUNCTION_NAME
lossName
)
LOSS_FUNCTION_NAME
lossName
)
{
{
CheckNTErrors
((
gold
==
NULL
||
XTensor
::
Is
Identical
(
gold
,
y
)),
CheckNTErrors
((
gold
==
NULL
||
XTensor
::
Is
SameShaped
(
gold
,
y
)),
"The tensors must be of the same size!"
);
"The tensors must be of the same size!"
);
#ifdef USE_CUDA
#ifdef USE_CUDA
if
(
x
->
devID
>=
0
||
y
->
devID
>=
0
){
if
(
x
->
devID
>=
0
||
y
->
devID
>=
0
){
...
...
source/tensor/function/Softmax.cpp
查看文件 @
7ae1562d
...
@@ -230,7 +230,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
...
@@ -230,7 +230,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
}
}
}
}
else
{
else
{
CheckNTErrors
((
XTensor
::
Is
Identical
(
gold
,
y
)),
"The tensors must be of the same size!"
);
CheckNTErrors
((
XTensor
::
Is
SameShaped
(
gold
,
y
)),
"The tensors must be of the same size!"
);
for
(
int
k
=
0
;
k
<
blockNum
;
k
++
){
for
(
int
k
=
0
;
k
<
blockNum
;
k
++
){
gp
=
(
DTYPE
*
)
gold
->
data
+
k
*
blockSize
;
gp
=
(
DTYPE
*
)
gold
->
data
+
k
*
blockSize
;
op
=
(
DTYPE
*
)
y
->
data
+
k
*
blockSize
;
op
=
(
DTYPE
*
)
y
->
data
+
k
*
blockSize
;
...
@@ -269,7 +269,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
...
@@ -269,7 +269,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
}
}
}
}
else
{
else
{
CheckNTErrors
((
XTensor
::
Is
Identical
(
gold
,
y
)),
"The tensors must be of the same size!"
);
CheckNTErrors
((
XTensor
::
Is
SameShaped
(
gold
,
y
)),
"The tensors must be of the same size!"
);
for
(
int
k
=
0
;
k
<
blockNum
;
k
++
){
for
(
int
k
=
0
;
k
<
blockNum
;
k
++
){
gp
=
(
DTYPE
*
)
gold
->
data
+
k
*
blockSize
;
gp
=
(
DTYPE
*
)
gold
->
data
+
k
*
blockSize
;
op
=
(
DTYPE
*
)
y
->
data
+
k
*
blockSize
;
op
=
(
DTYPE
*
)
y
->
data
+
k
*
blockSize
;
...
...
source/tensor/function/Softmax.cu
查看文件 @
7ae1562d
...
@@ -167,7 +167,7 @@ void _CudaSoftmaxSumMax(const XTensor * x, XTensor * y, int leadDim, XTensor * s
...
@@ -167,7 +167,7 @@ void _CudaSoftmaxSumMax(const XTensor * x, XTensor * y, int leadDim, XTensor * s
{
{
CheckNTErrors((x->devID >= 0), "Forward computation of softmax must be run on GPUs.");
CheckNTErrors((x->devID >= 0), "Forward computation of softmax must be run on GPUs.");
CheckNTErrors((x->devID == y->devID), "Tensors used in softmax are not on the same GPU.");
CheckNTErrors((x->devID == y->devID), "Tensors used in softmax are not on the same GPU.");
CheckNTErrors((XTensor::Is
Identical
(x, y)), "Input tensors must be of the same size!");
CheckNTErrors((XTensor::Is
SameShaped
(x, y)), "Input tensors must be of the same size!");
int leadDimRDI = y->order - leadDim - 1;
int leadDimRDI = y->order - leadDim - 1;
int dimensionSize = y->dimSizeRDI[leadDimRDI];
int dimensionSize = y->dimSizeRDI[leadDimRDI];
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论