Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
N
NiuTrans.Tensor
概览
Overview
Details
Activity
Cycle Analytics
版本库
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
问题
8
Issues
8
列表
Board
标记
里程碑
合并请求
0
Merge Requests
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
Snippets
成员
Collapse sidebar
Close sidebar
活动
图像
聊天
创建新问题
作业
提交
Issue Boards
Open sidebar
NiuTrans
NiuTrans.Tensor
Commits
003def3d
Commit
003def3d
authored
Jul 11, 2018
by
xuchen
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
1.Update test case 2.Bug fixed in MatrxiMulBatched function 3.Bug fixed in SumByColumnVT function
parent
f12ced64
显示空白字符变更
内嵌
并排
正在显示
15 个修改的文件
包含
698 行增加
和
745 行删除
+698
-745
source/core/arithmetic/MatrixMulBatched.cpp
+3
-3
source/core/arithmetic/SumByColumnVT.cu
+1
-1
source/test/TConcatenate.cpp
+3
-3
source/test/TCopyIndexed.cpp
+227
-1
source/test/THardTanH.cpp
+97
-389
source/test/TIdentity.cpp
+61
-50
source/test/TLogSoftmax.cpp
+84
-80
source/test/TLoss.cpp
+6
-7
source/test/TMatrixMulBatched.cpp
+1
-0
source/test/TRectify.cpp
+66
-68
source/test/TSetAscendingOrder.cpp
+1
-3
source/test/TSetData.cpp
+5
-2
source/test/TSigmoid.cpp
+65
-83
source/test/TSoftmax.cpp
+53
-47
source/test/TSplit.cpp
+25
-8
没有找到文件。
source/core/arithmetic/MatrixMulBatched.cpp
查看文件 @
003def3d
...
@@ -89,9 +89,9 @@ void MatrixMulBatched(XTensor * a, MATRIX_TRANS_TYPE transposedA,
...
@@ -89,9 +89,9 @@ void MatrixMulBatched(XTensor * a, MATRIX_TRANS_TYPE transposedA,
void
*
ap
=
(
char
*
)
a
->
data
+
aRealBlockSize
*
p
;
void
*
ap
=
(
char
*
)
a
->
data
+
aRealBlockSize
*
p
;
void
*
bp
=
(
char
*
)
b
->
data
+
bRealBlockSize
*
p
;
void
*
bp
=
(
char
*
)
b
->
data
+
bRealBlockSize
*
p
;
void
*
cp
=
(
char
*
)
c
->
data
+
cRealBlockSize
*
p
;
void
*
cp
=
(
char
*
)
c
->
data
+
cRealBlockSize
*
p
;
XTensor
*
ai
=
new
XTensor
(
2
,
aDimSize
,
a
->
dataType
,
a
->
denseRatio
,
a
->
mem
);
XTensor
*
ai
=
NewTensor
(
2
,
aDimSize
,
a
->
dataType
,
a
->
denseRatio
,
a
->
devID
,
a
->
mem
);
XTensor
*
bi
=
new
XTensor
(
2
,
bDimSize
,
b
->
dataType
,
b
->
denseRatio
,
b
->
mem
);
XTensor
*
bi
=
NewTensor
(
2
,
bDimSize
,
b
->
dataType
,
b
->
denseRatio
,
a
->
devID
,
b
->
mem
);
XTensor
*
ci
=
new
XTensor
(
2
,
cDimSize
,
c
->
dataType
,
c
->
denseRatio
,
c
->
mem
);
XTensor
*
ci
=
NewTensor
(
2
,
cDimSize
,
c
->
dataType
,
c
->
denseRatio
,
a
->
devID
,
c
->
mem
);
ai
->
data
=
ap
;
ai
->
data
=
ap
;
bi
->
data
=
bp
;
bi
->
data
=
bp
;
ci
->
data
=
cp
;
ci
->
data
=
cp
;
...
...
source/core/arithmetic/SumByColumnVT.cu
查看文件 @
003def3d
...
@@ -52,7 +52,7 @@ void KernelADDByColumnVT(DTYPE * a, DTYPE * b, DTYPE * c, int colNum, int rowNum
...
@@ -52,7 +52,7 @@ void KernelADDByColumnVT(DTYPE * a, DTYPE * b, DTYPE * c, int colNum, int rowNum
DTYPE * bp = b + (rowNum * k + row) * colNum;
DTYPE * bp = b + (rowNum * k + row) * colNum;
if (colNum % 4 == 0) {
if (colNum % 4 == 0) {
for (int i = 0; i < colNum; i += 4)
for (int i = 0; i < colNum; i += 4)
sum += bp[i] + bp[i + 1] + b
[i + 2] + b
[i + 3];
sum += bp[i] + bp[i + 1] + b
p[i + 2] + bp
[i + 3];
}
}
else if (colNum % 2 == 0) {
else if (colNum % 2 == 0) {
for (int i = 0; i < colNum; i += 2)
for (int i = 0; i < colNum; i += 2)
...
...
source/test/TConcatenate.cpp
查看文件 @
003def3d
...
@@ -483,9 +483,9 @@ bool TestConcatenate4()
...
@@ -483,9 +483,9 @@ bool TestConcatenate4()
delete
sGPU1
;
delete
sGPU1
;
delete
sGPU2
;
delete
sGPU2
;
delete
tGPU
;
delete
tGPU
;
delete
[]
sDimSize1
;
//
delete[] sDimSize1;
delete
[]
sDimSize2
;
//
delete[] sDimSize2;
delete
[]
tDimSize
;
//
delete[] tDimSize;
return
cpuTest
&&
gpuTest
;
return
cpuTest
&&
gpuTest
;
#else
#else
...
...
source/test/TCopyIndexed.cpp
查看文件 @
003def3d
...
@@ -20,11 +20,12 @@
...
@@ -20,11 +20,12 @@
*/
*/
#include "TCopyIndexed.h"
#include "TCopyIndexed.h"
#include "../xc/Mycode.h"
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
/*
/*
case 1 copy indexed sub-tensors
case 1
:
copy indexed sub-tensors
In this case, (3, 2, 3) -> (3, 2, 2), dim = 2, indexSize = 2,
In this case, (3, 2, 3) -> (3, 2, 2), dim = 2, indexSize = 2,
srcIndex = [0, 2], tgtIndex = [0, 1], copyNum = 1.
srcIndex = [0, 2], tgtIndex = [0, 1], copyNum = 1.
*/
*/
...
@@ -127,6 +128,213 @@ bool TestCopyIndexed1()
...
@@ -127,6 +128,213 @@ bool TestCopyIndexed1()
#endif // USE_CUDA
#endif // USE_CUDA
}
}
/*
case 2: copy indexed sub-tensors
In this case, (3, 2, 3) -> (3, 2, 2), dim = 2, indexSize = 2,
srcIndex = [0, 2], tgtIndex = [1, 0], copyNum = 1.
*/
bool
TestCopyIndexed2
()
{
/* a input tensor of size (3, 2, 3) */
int
sOrder
=
3
;
int
*
sDimSize
=
new
int
[
sOrder
];
sDimSize
[
0
]
=
3
;
sDimSize
[
1
]
=
2
;
sDimSize
[
2
]
=
3
;
int
sUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
sOrder
;
i
++
)
sUnitNum
*=
sDimSize
[
i
];
/* a output tensor of size (3, 2, 2) */
int
tOrder
=
3
;
int
*
tDimSize
=
new
int
[
tOrder
];
tDimSize
[
0
]
=
3
;
tDimSize
[
1
]
=
2
;
tDimSize
[
2
]
=
2
;
int
tUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
tOrder
;
i
++
)
tUnitNum
*=
tDimSize
[
i
];
DTYPE
sData
[
3
][
2
][
3
]
=
{
{
{
0.0
F
,
-
1.0
F
,
2.0
F
},
{
2.0
F
,
1.0
F
,
3.0
F
}
},
{
{
1.0
F
,
2.0
F
,
4.0
F
},
{
3.0
F
,
1.0
F
,
2.0
F
}},
{
{
-
1.0
F
,
3.0
F
,
2.0
F
},
{
1.0
F
,
-
1.0
F
,
0.0
F
}
}
};
DTYPE
answer
[
3
][
2
][
2
]
=
{
{
{
2.0
F
,
0.0
F
},
{
3.0
F
,
2.0
F
}
},
{
{
4.0
F
,
1.0
F
},
{
2.0
F
,
3.0
F
}},
{
{
2.0
F
,
-
1.0
F
},
{
0.0
F
,
1.0
F
}
}
};
int
dim
=
2
;
int
indexSize
=
2
;
int
srcIndex
[
2
]
=
{
0
,
2
};
int
tgtIndex
[
2
]
=
{
1
,
0
};
int
copyNum
=
1
;
/* CPU test */
bool
cpuTest
=
true
;
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
/* initialize variables */
s
->
SetData
(
sData
,
sUnitNum
);
t
->
SetZeroAll
();
/* call CopyIndexed function */
CopyIndexed
(
s
,
t
,
dim
,
srcIndex
,
indexSize
,
tgtIndex
,
copyNum
);
/* check results */
cpuTest
=
t
->
CheckData
(
answer
,
tUnitNum
);
#ifdef USE_CUDA
/* GPU test */
bool
gpuTest
=
true
;
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
sGPU
->
SetData
(
sData
,
sUnitNum
);
tGPU
->
SetZeroAll
();
/* call CopyIndexed function */
CopyIndexed
(
sGPU
,
tGPU
,
dim
,
srcIndex
,
indexSize
,
tgtIndex
,
copyNum
);
/* check results */
gpuTest
=
tGPU
->
CheckData
(
answer
,
tUnitNum
);
/* destroy variables */
delete
s
;
delete
t
;
delete
sGPU
;
delete
tGPU
;
delete
[]
sDimSize
;
delete
[]
tDimSize
;
return
cpuTest
&&
gpuTest
;
#else
/* destroy variables */
delete
s
;
delete
t
;
delete
[]
sDimSize
;
delete
[]
tDimSize
;
return
cpuTest
;
#endif // USE_CUDA
}
/*
case 3: copy indexed sub-tensors
In this case, (3, 2, 3) -> (3, 2, 2), dim = 2, indexSize = 1,
srcIndex = [0], tgtIndex = [0], copyNum = 2.
*/
bool
TestCopyIndexed3
()
{
/* a input tensor of size (3, 2, 3) */
int
sOrder
=
3
;
int
*
sDimSize
=
new
int
[
sOrder
];
sDimSize
[
0
]
=
3
;
sDimSize
[
1
]
=
2
;
sDimSize
[
2
]
=
3
;
int
sUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
sOrder
;
i
++
)
sUnitNum
*=
sDimSize
[
i
];
/* a output tensor of size (3, 2, 2) */
int
tOrder
=
3
;
int
*
tDimSize
=
new
int
[
tOrder
];
tDimSize
[
0
]
=
3
;
tDimSize
[
1
]
=
2
;
tDimSize
[
2
]
=
2
;
int
tUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
tOrder
;
i
++
)
tUnitNum
*=
tDimSize
[
i
];
DTYPE
sData
[
3
][
2
][
3
]
=
{
{
{
0.0
F
,
-
1.0
F
,
2.0
F
},
{
2.0
F
,
1.0
F
,
3.0
F
}
},
{
{
1.0
F
,
2.0
F
,
4.0
F
},
{
3.0
F
,
1.0
F
,
2.0
F
}},
{
{
-
1.0
F
,
3.0
F
,
2.0
F
},
{
1.0
F
,
-
1.0
F
,
0.0
F
}
}
};
DTYPE
answer
[
3
][
2
][
2
]
=
{
{
{
0.0
F
,
-
1.0
F
},
{
2.0
F
,
1.0
F
}
},
{
{
1.0
F
,
2.0
F
},
{
3.0
F
,
1.0
F
}},
{
{
-
1.0
F
,
3.0
F
},
{
1.0
F
,
-
1.0
F
}
}
};
int
dim
=
2
;
int
indexSize
=
1
;
int
srcIndex
[
1
]
=
{
0
};
int
tgtIndex
[
1
]
=
{
0
};
int
copyNum
=
2
;
/* CPU test */
bool
cpuTest
=
true
;
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
/* initialize variables */
s
->
SetData
(
sData
,
sUnitNum
);
t
->
SetZeroAll
();
/* call CopyIndexed function */
CopyIndexed
(
s
,
t
,
dim
,
srcIndex
,
indexSize
,
tgtIndex
,
copyNum
);
/* check results */
cpuTest
=
t
->
CheckData
(
answer
,
tUnitNum
);
#ifdef USE_CUDA
/* GPU test */
bool
gpuTest
=
true
;
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
sGPU
->
SetData
(
sData
,
sUnitNum
);
tGPU
->
SetZeroAll
();
/* call CopyIndexed function */
CopyIndexed
(
sGPU
,
tGPU
,
dim
,
srcIndex
,
indexSize
,
tgtIndex
,
copyNum
);
/* check results */
gpuTest
=
tGPU
->
CheckData
(
answer
,
tUnitNum
);
/* destroy variables */
delete
s
;
delete
t
;
delete
sGPU
;
delete
tGPU
;
delete
[]
sDimSize
;
delete
[]
tDimSize
;
return
cpuTest
&&
gpuTest
;
#else
/* destroy variables */
delete
s
;
delete
t
;
delete
[]
sDimSize
;
delete
[]
tDimSize
;
return
cpuTest
;
#endif // USE_CUDA
}
/* other cases */
/* other cases */
/*
/*
TODO!!
TODO!!
...
@@ -147,6 +355,24 @@ bool TestCopyIndexed()
...
@@ -147,6 +355,24 @@ bool TestCopyIndexed()
else
else
XPRINT
(
0
,
stdout
,
">> case 1 passed!
\n
"
);
XPRINT
(
0
,
stdout
,
">> case 1 passed!
\n
"
);
/* case 2 test */
caseFlag
=
TestCopyIndexed2
();
if
(
!
caseFlag
)
{
returnFlag
=
false
;
XPRINT
(
0
,
stdout
,
">> case 2 failed!
\n
"
);
}
else
XPRINT
(
0
,
stdout
,
">> case 2 passed!
\n
"
);
/* case 3 test */
caseFlag
=
TestCopyIndexed3
();
if
(
!
caseFlag
)
{
returnFlag
=
false
;
XPRINT
(
0
,
stdout
,
">> case 3 failed!
\n
"
);
}
else
XPRINT
(
0
,
stdout
,
">> case 3 passed!
\n
"
);
/* other cases test */
/* other cases test */
/*
/*
TODO!!
TODO!!
...
...
source/test/THardTanH.cpp
查看文件 @
003def3d
...
@@ -19,32 +19,28 @@
...
@@ -19,32 +19,28 @@
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-20
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-20
*/
*/
#include "../XTensor.h"
#include "THardTanH.h"
#include "THardTanH.h"
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
/* case 1: hard tanh function */
/*
case 1: test HardTanH function
y = 1 if x > 1
x if -1 <= x <= 1
-1 if x < -1
*/
bool
TestHardTanH1
()
bool
TestHardTanH1
()
{
{
/* a x tensor of size (2, 3) */
/* a tensor of size (2, 3) */
int
xOrder
=
2
;
int
order
=
2
;
int
*
xDimSize
=
new
int
[
xOrder
];
int
*
dimSize
=
new
int
[
order
];
xDimSize
[
0
]
=
2
;
dimSize
[
0
]
=
2
;
xDimSize
[
1
]
=
3
;
dimSize
[
1
]
=
3
;
int
xUnitNum
=
1
;
int
unitNum
=
1
;
for
(
int
i
=
0
;
i
<
xOrder
;
i
++
)
for
(
int
i
=
0
;
i
<
order
;
i
++
)
xUnitNum
*=
xDimSize
[
i
];
unitNum
*=
dimSize
[
i
];
/* a y tensor of size (2, 3) */
int
yOrder
=
2
;
int
*
yDimSize
=
new
int
[
yOrder
];
yDimSize
[
0
]
=
2
;
yDimSize
[
1
]
=
3
;
int
yUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
yOrder
;
i
++
)
yUnitNum
*=
yDimSize
[
i
];
DTYPE
xData
[
2
][
3
]
=
{
{
0.5
F
,
-
1.0
F
,
2.0
F
},
DTYPE
xData
[
2
][
3
]
=
{
{
0.5
F
,
-
1.0
F
,
2.0
F
},
{
3.5
F
,
-
4.5
F
,
1.0
F
}
};
{
3.5
F
,
-
4.5
F
,
1.0
F
}
};
...
@@ -55,436 +51,168 @@ bool TestHardTanH1()
...
@@ -55,436 +51,168 @@ bool TestHardTanH1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
xOrder
,
xD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
yOrder
,
yD
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
/* initialize variables */
x
->
SetData
(
xData
,
xU
nitNum
);
x
->
SetData
(
xData
,
u
nitNum
);
y
->
SetZeroAll
();
y
->
SetZeroAll
();
/* call hardtanh function */
/* call hardtanh function */
HardTanH
(
x
,
y
);
HardTanH
(
x
,
y
);
/* check results */
/* check results */
cpuTest
=
y
->
CheckData
(
answer
,
yU
nitNum
,
1e-4
F
);
cpuTest
=
y
->
CheckData
(
answer
,
u
nitNum
,
1e-4
F
);
#ifdef USE_CUDA
#ifdef USE_CUDA
/* GPU test */
/* GPU test */
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
xGPU
=
NewTensor
(
xOrder
,
xD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
yOrder
,
yD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* Initialize variables */
/* Initialize variables */
xGPU
->
SetData
(
xData
,
xU
nitNum
);
xGPU
->
SetData
(
xData
,
u
nitNum
);
yGPU
->
SetZeroAll
();
yGPU
->
SetZeroAll
();
/* call hardtanh function */
/* call hardtanh function */
HardTanH
(
xGPU
,
yGPU
);
HardTanH
(
xGPU
,
yGPU
);
/* check results */
/* check results */
gpuTest
=
yGPU
->
CheckData
(
answer
,
yU
nitNum
,
1e-4
F
);
gpuTest
=
yGPU
->
CheckData
(
answer
,
u
nitNum
,
1e-4
F
);
/* destroy variables */
/* destroy variables */
delete
x
;
delete
x
;
delete
y
;
delete
y
;
delete
xGPU
;
delete
xGPU
;
delete
yGPU
;
delete
yGPU
;
delete
[]
xDimSize
;
delete
[]
dimSize
;
delete
[]
yDimSize
;
return
cpuTest
&&
gpuTest
;
return
cpuTest
&&
gpuTest
;
#else
#else
/* destroy variables */
/* destroy variables */
delete
x
;
delete
x
;
delete
y
;
delete
y
;
delete
[]
xDimSize
;
delete
[]
dimSize
;
delete
[]
yDimSize
;
return
cpuTest
;
return
cpuTest
;
#endif // USE_CUDA
#endif // USE_CUDA
}
}
/*
/*
case 2: backward computation
case 2: test backward computation of HardTanH function.
In this case, lossName=CROSSENTROPY.
dE/dx = dE/dy * dy/dx
hard tanh: y = 1 if x > 1
x if -1 <= x <= 1
-1 if x< -1
and dy/dx = 1 if -1 <= x <= 1
0 otherwise
In this case, lossName=SQUAREDERROR.
*/
*/
bool
TestHardTanH2
()
bool
TestHardTanH2
()
{
{
/* a x tensor of size (2, 3) */
/* a tensor of size (2, 3) */
int
xOrder
=
2
;
int
order
=
2
;
int
*
xDimSize
=
new
int
[
xOrder
];
int
*
dimSize
=
new
int
[
order
];
xDimSize
[
0
]
=
2
;
dimSize
[
0
]
=
2
;
xDimSize
[
1
]
=
3
;
dimSize
[
1
]
=
3
;
int
xUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
xOrder
;
i
++
)
xUnitNum
*=
xDimSize
[
i
];
/* a y tensor of size (2, 3) */
int
yOrder
=
2
;
int
*
yDimSize
=
new
int
[
yOrder
];
yDimSize
[
0
]
=
2
;
yDimSize
[
1
]
=
3
;
int
yUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
yOrder
;
i
++
)
yUnitNum
*=
yDimSize
[
i
];
/* a gold tensor of size (2, 3) */
int
goldOrder
=
2
;
int
*
goldDimSize
=
new
int
[
goldOrder
];
goldDimSize
[
0
]
=
2
;
goldDimSize
[
1
]
=
3
;
int
goldUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
goldOrder
;
i
++
)
goldUnitNum
*=
goldDimSize
[
i
];
/* a dedy tensor of size (2, 3) */
int
dedyOrder
=
2
;
int
*
dedyDimSize
=
new
int
[
dedyOrder
];
dedyDimSize
[
0
]
=
2
;
dedyDimSize
[
1
]
=
3
;
int
dedyUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
dedyOrder
;
i
++
)
dedyUnitNum
*=
dedyDimSize
[
i
];
/* a dedx tensor of size (2, 3) */
int
dedxOrder
=
2
;
int
*
dedxDimSize
=
new
int
[
dedxOrder
];
dedxDimSize
[
0
]
=
2
;
dedxDimSize
[
1
]
=
3
;
int
dedxUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
dedxOrder
;
i
++
)
dedxUnitNum
*=
dedxDimSize
[
i
];
DTYPE
xData
[
2
][
3
]
=
{
{
0.5
F
,
-
1.0
F
,
2.0
F
},
{
3.5
F
,
-
4.5
F
,
1.0
F
}
};
DTYPE
yData
[
2
][
3
]
=
{
{
0.5
F
,
-
1.0
F
,
1.0
F
},
{
1.0
F
,
-
1.0
F
,
1.0
F
}
};
DTYPE
goldData
[
2
][
3
]
=
{
{
1.0
F
,
1.0
F
,
1.0
F
},
{
1.0
F
,
1.0
F
,
1.0
F
}
};
DTYPE
dedyData
[
2
][
3
]
=
{
{
-
2.0
F
,
1.0
F
,
-
1.0
F
},
{
-
1.0
F
,
1.0
F
,
-
1.0
F
}
};
DTYPE
answer
[
2
][
3
]
=
{
{
-
2.0
F
,
1.0
F
,
0.0
F
},
{
0.0
F
,
0.0
F
,
-
1.0
F
}
};
/* CPU test */
bool
cpuTest
=
true
;
/* create tensors */
int
unitNum
=
1
;
XTensor
*
x
=
NewTensor
(
xOrder
,
xDimSize
);
for
(
int
i
=
0
;
i
<
order
;
i
++
)
XTensor
*
y
=
NewTensor
(
yOrder
,
yDimSize
);
unitNum
*=
dimSize
[
i
];
XTensor
*
gold
=
NewTensor
(
goldOrder
,
goldDimSize
);
XTensor
*
dedy
=
NewTensor
(
dedyOrder
,
dedyDimSize
);
XTensor
*
dedx
=
NewTensor
(
dedxOrder
,
dedxDimSize
);
/* initialize variables */
x
->
SetData
(
xData
,
xUnitNum
);
y
->
SetData
(
yData
,
yUnitNum
);
gold
->
SetData
(
goldData
,
goldUnitNum
);
dedy
->
SetData
(
dedyData
,
dedyUnitNum
);
dedx
->
SetZeroAll
();
/* call hardtanhbackward function */
HardTanHBackward
(
gold
,
y
,
x
,
dedy
,
dedx
,
CROSSENTROPY
);
/* check results */
cpuTest
=
dedx
->
CheckData
(
answer
,
dedxUnitNum
);
#ifdef USE_CUDA
/* GPU test */
bool
gpuTest
=
true
;
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
xOrder
,
xDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
yOrder
,
yDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
(
goldOrder
,
goldDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
dedyOrder
,
dedyDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
dedxOrder
,
dedxDimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
xGPU
->
SetData
(
xData
,
xUnitNum
);
yGPU
->
SetData
(
yData
,
yUnitNum
);
goldGPU
->
SetData
(
goldData
,
goldUnitNum
);
dedyGPU
->
SetData
(
dedyData
,
dedyUnitNum
);
dedxGPU
->
SetZeroAll
();
/* call hardtanhbackward function */
HardTanHBackward
(
goldGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
CROSSENTROPY
);
/* check results */
gpuTest
=
dedxGPU
->
CheckData
(
answer
,
dedxUnitNum
);
/* destroy variables */
delete
x
,
y
,
dedy
,
dedx
,
gold
,
xGPU
,
yGPU
,
dedyGPU
,
dedxGPU
,
goldGPU
;
delete
[]
xDimSize
,
yDimSize
,
dedyDimSize
,
dedxDimSize
,
goldDimSize
;
return
cpuTest
&&
gpuTest
;
#else
/* destroy variables */
delete
x
,
y
,
dedy
,
dedx
,
gold
;
delete
[]
xDimSize
,
yDimSize
,
dedyDimSize
,
dedxDimSize
,
goldDimSize
;
return
cpuTest
;
#endif // USE_CUDA
}
/*
case 3: backward computation
In this case, lossName=SQUAREDERROR.
*/
bool
TestHardTanH3
()
{
/* a x tensor of size (2, 3) */
int
xOrder
=
2
;
int
*
xDimSize
=
new
int
[
xOrder
];
xDimSize
[
0
]
=
2
;
xDimSize
[
1
]
=
3
;
int
xUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
xOrder
;
i
++
)
xUnitNum
*=
xDimSize
[
i
];
/* a y tensor of size (2, 3) */
int
yOrder
=
2
;
int
*
yDimSize
=
new
int
[
yOrder
];
yDimSize
[
0
]
=
2
;
yDimSize
[
1
]
=
3
;
int
yUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
yOrder
;
i
++
)
yUnitNum
*=
yDimSize
[
i
];
/* a gold tensor of size (2, 3) */
int
goldOrder
=
2
;
int
*
goldDimSize
=
new
int
[
goldOrder
];
goldDimSize
[
0
]
=
2
;
goldDimSize
[
1
]
=
3
;
int
goldUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
goldOrder
;
i
++
)
goldUnitNum
*=
goldDimSize
[
i
];
/* a dedy tensor of size (2, 3) */
int
dedyOrder
=
2
;
int
*
dedyDimSize
=
new
int
[
dedyOrder
];
dedyDimSize
[
0
]
=
2
;
dedyDimSize
[
1
]
=
3
;
int
dedyUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
dedyOrder
;
i
++
)
dedyUnitNum
*=
dedyDimSize
[
i
];
/* a dedx tensor of size (2, 3) */
int
dedxOrder
=
2
;
int
*
dedxDimSize
=
new
int
[
dedxOrder
];
dedxDimSize
[
0
]
=
2
;
dedxDimSize
[
1
]
=
3
;
int
dedxUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
dedxOrder
;
i
++
)
dedxUnitNum
*=
dedxDimSize
[
i
];
DTYPE
xData
[
2
][
3
]
=
{
{
0.5
F
,
-
1.0
F
,
2.0
F
},
DTYPE
xData
[
2
][
3
]
=
{
{
0.5
F
,
-
1.0
F
,
2.0
F
},
{
3.5
F
,
-
4.5
F
,
1.0
F
}
};
{
3.5
F
,
-
4.5
F
,
1.0
F
}
};
DTYPE
yData
[
2
][
3
]
=
{
{
0.5
F
,
-
1.0
F
,
1.0
F
},
{
1.0
F
,
-
1.0
F
,
1.0
F
}
};
DTYPE
goldData
[
2
][
3
]
=
{
{
1.0
F
,
1.0
F
,
1.0
F
},
DTYPE
goldData
[
2
][
3
]
=
{
{
1.0
F
,
1.0
F
,
1.0
F
},
{
1.0
F
,
1.0
F
,
1.0
F
}
};
{
1.0
F
,
1.0
F
,
1.0
F
}
};
DTYPE
dedyData
[
2
][
3
]
=
{
{
-
0.5
F
,
-
2.0
F
,
0.0
F
},
DTYPE
yAnswer
[
2
][
3
]
=
{
{
0.5
F
,
-
1.0
F
,
1.0
F
},
{
0.0
F
,
-
2.0
F
,
0.0
F
}
};
DTYPE
answer
[
2
][
3
]
=
{
{
-
0.5
F
,
-
2.0
F
,
0.0
F
},
{
0.0
F
,
0.0
F
,
0.0
F
}
};
/* CPU test */
bool
cpuTest
=
true
;
/* create tensors */
XTensor
*
x
=
NewTensor
(
xOrder
,
xDimSize
);
XTensor
*
y
=
NewTensor
(
yOrder
,
yDimSize
);
XTensor
*
gold
=
NewTensor
(
goldOrder
,
goldDimSize
);
XTensor
*
dedy
=
NewTensor
(
dedyOrder
,
dedyDimSize
);
XTensor
*
dedx
=
NewTensor
(
dedxOrder
,
dedxDimSize
);
/* initialize variables */
x
->
SetData
(
xData
,
xUnitNum
);
y
->
SetData
(
yData
,
yUnitNum
);
gold
->
SetData
(
goldData
,
goldUnitNum
);
dedy
->
SetData
(
dedyData
,
dedyUnitNum
);
dedx
->
SetZeroAll
();
/* call hardtanhbackward function */
HardTanHBackward
(
gold
,
y
,
x
,
dedy
,
dedx
,
SQUAREDERROR
);
/* check results */
cpuTest
=
dedx
->
CheckData
(
answer
,
dedxUnitNum
);
#ifdef USE_CUDA
/* GPU test */
bool
gpuTest
=
true
;
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
xOrder
,
xDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
yOrder
,
yDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
(
goldOrder
,
goldDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
dedyOrder
,
dedyDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
dedxOrder
,
dedxDimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
xGPU
->
SetData
(
xData
,
xUnitNum
);
yGPU
->
SetData
(
yData
,
yUnitNum
);
goldGPU
->
SetData
(
goldData
,
goldUnitNum
);
dedyGPU
->
SetData
(
dedyData
,
dedyUnitNum
);
dedxGPU
->
SetZeroAll
();
/* call hardtanhbackward function */
HardTanHBackward
(
goldGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
SQUAREDERROR
);
/* check results */
gpuTest
=
dedxGPU
->
CheckData
(
answer
,
dedxUnitNum
);
/* destroy variables */
delete
x
,
y
,
dedy
,
dedx
,
gold
,
xGPU
,
yGPU
,
dedyGPU
,
dedxGPU
,
goldGPU
;
delete
[]
xDimSize
,
yDimSize
,
dedyDimSize
,
dedxDimSize
,
goldDimSize
;
return
cpuTest
&&
gpuTest
;
#else
/* destroy variables */
delete
x
,
y
,
dedy
,
dedx
,
gold
;
delete
[]
xDimSize
,
yDimSize
,
dedyDimSize
,
dedxDimSize
,
goldDimSize
;
return
cpuTest
;
#endif // USE_CUDA
}
/*
case 4: backward computation
In this case, lossName=ONEHOTERROR.
*/
bool
TestHardTanH4
()
{
/* a x tensor of size (2, 3) */
int
xOrder
=
2
;
int
*
xDimSize
=
new
int
[
xOrder
];
xDimSize
[
0
]
=
2
;
xDimSize
[
1
]
=
3
;
int
xUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
xOrder
;
i
++
)
xUnitNum
*=
xDimSize
[
i
];
/* a y tensor of size (2, 3) */
int
yOrder
=
2
;
int
*
yDimSize
=
new
int
[
yOrder
];
yDimSize
[
0
]
=
2
;
yDimSize
[
1
]
=
3
;
int
yUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
yOrder
;
i
++
)
yUnitNum
*=
yDimSize
[
i
];
/* a gold tensor of size (2, 3) */
int
goldOrder
=
2
;
int
*
goldDimSize
=
new
int
[
goldOrder
];
goldDimSize
[
0
]
=
2
;
goldDimSize
[
1
]
=
3
;
int
goldUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
goldOrder
;
i
++
)
goldUnitNum
*=
goldDimSize
[
i
];
/* a dedy tensor of size (2, 3) */
int
dedyOrder
=
2
;
int
*
dedyDimSize
=
new
int
[
dedyOrder
];
dedyDimSize
[
0
]
=
2
;
dedyDimSize
[
1
]
=
3
;
int
dedyUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
dedyOrder
;
i
++
)
dedyUnitNum
*=
dedyDimSize
[
i
];
/* a dedx tensor of size (2, 3) */
int
dedxOrder
=
2
;
int
*
dedxDimSize
=
new
int
[
dedxOrder
];
dedxDimSize
[
0
]
=
2
;
dedxDimSize
[
1
]
=
3
;
int
dedxUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
dedxOrder
;
i
++
)
dedxUnitNum
*=
dedxDimSize
[
i
];
DTYPE
xData
[
2
][
3
]
=
{
{
0.5
F
,
-
1.0
F
,
2.0
F
},
{
3.5
F
,
-
4.5
F
,
1.0
F
}
};
DTYPE
yData
[
2
][
3
]
=
{
{
0.5
F
,
-
1.0
F
,
1.0
F
},
{
1.0
F
,
-
1.0
F
,
1.0
F
}
};
{
1.0
F
,
-
1.0
F
,
1.0
F
}
};
DTYPE
goldData
[
2
][
3
]
=
{
{
1.0
F
,
0.0
F
,
1.0
F
},
DTYPE
dedyAnswer
[
2
][
3
]
=
{
{
-
0.5
F
,
-
2.0
F
,
0.0
F
},
{
0.0
F
,
1.0
F
,
1.0
F
}
};
DTYPE
dedyData
[
2
][
3
]
=
{
{
-
0.5
F
,
0.0
F
,
0.0
F
},
{
0.0
F
,
-
2.0
F
,
0.0
F
}
};
{
0.0
F
,
-
2.0
F
,
0.0
F
}
};
DTYPE
answer
[
2
][
3
]
=
{
{
-
0.5
F
,
0
.0
F
,
0.0
F
},
DTYPE
dedxAnswer
[
2
][
3
]
=
{
{
-
0.5
F
,
-
2
.0
F
,
0.0
F
},
{
0.0
F
,
0.0
F
,
0.0
F
}
};
{
0.0
F
,
0.0
F
,
-
0.0
F
}
};
/* CPU test */
/* CPU test */
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
xOrder
,
xD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
yOrder
,
yD
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
gold
=
NewTensor
(
goldOrder
,
goldD
imSize
);
XTensor
*
gold
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedy
=
NewTensor
(
dedyOrder
,
dedyD
imSize
);
XTensor
*
dedy
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedx
=
NewTensor
(
dedxOrder
,
dedxD
imSize
);
XTensor
*
dedx
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
/* initialize variables */
x
->
SetData
(
xData
,
xU
nitNum
);
x
->
SetData
(
xData
,
u
nitNum
);
y
->
SetData
(
yData
,
yU
nitNum
);
gold
->
SetData
(
goldData
,
u
nitNum
);
gold
->
SetData
(
goldData
,
goldUnitNum
);
y
->
SetZeroAll
(
);
dedy
->
SetData
(
dedyData
,
dedyUnitNum
);
dedy
->
SetZeroAll
(
);
dedx
->
SetZeroAll
();
dedx
->
SetZeroAll
();
/* call hardtanhbackward function */
/* call HardTanH function */
HardTanHBackward
(
gold
,
y
,
x
,
dedy
,
dedx
,
ONEHOTERROR
);
HardTanH
(
x
,
y
);
/* call HardTanHBackward function */
HardTanHBackward
(
gold
,
y
,
x
,
dedy
,
dedx
,
SQUAREDERROR
);
/* check results */
/* check results */
cpuTest
=
dedx
->
CheckData
(
answer
,
dedxUnitNum
);
cpuTest
=
y
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedx
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
)
&&
dedy
->
CheckData
(
dedyAnswer
,
unitNum
,
1e-4
F
);
#ifdef USE_CUDA
#ifdef USE_CUDA
/* GPU test */
/* GPU test */
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
xOrder
,
xD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
yOrder
,
yD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
(
goldOrder
,
goldD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
dedyOrder
,
dedyD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
dedxOrder
,
dedxD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
/* initialize variables */
xGPU
->
SetData
(
xData
,
xU
nitNum
);
xGPU
->
SetData
(
xData
,
u
nitNum
);
yGPU
->
SetData
(
yData
,
yU
nitNum
);
goldGPU
->
SetData
(
goldData
,
u
nitNum
);
goldGPU
->
SetData
(
goldData
,
goldUnitNum
);
yGPU
->
SetZeroAll
(
);
dedyGPU
->
SetData
(
dedyData
,
dedyUnitNum
);
dedyGPU
->
SetZeroAll
(
);
dedxGPU
->
SetZeroAll
();
dedxGPU
->
SetZeroAll
();
/* call HardTanH function */
HardTanH
(
xGPU
,
yGPU
);
/* call hardtanhbackward function */
/* call hardtanhbackward function */
HardTanHBackward
(
goldGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
ONEHOT
ERROR
);
HardTanHBackward
(
goldGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
SQUARED
ERROR
);
/* check results */
/* check results */
gpuTest
=
dedxGPU
->
CheckData
(
answer
,
dedxUnitNum
);
gpuTest
=
y
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedxGPU
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
)
&&
dedyGPU
->
CheckData
(
dedyAnswer
,
unitNum
,
1e-4
F
);
/* destroy variables */
/* destroy variables */
delete
x
,
y
,
dedy
,
dedx
,
gold
,
xGPU
,
yGPU
,
dedyGPU
,
dedxGPU
,
goldGPU
;
delete
x
;
delete
[]
xDimSize
,
yDimSize
,
dedyDimSize
,
dedxDimSize
,
goldDimSize
;
delete
y
;
delete
gold
;
delete
dedx
;
delete
dedy
;
delete
xGPU
;
delete
yGPU
;
delete
goldGPU
;
delete
dedxGPU
;
delete
dedyGPU
;
delete
[]
dimSize
;
return
cpuTest
&&
gpuTest
;
return
cpuTest
&&
gpuTest
;
#else
#else
/* destroy variables */
/* destroy variables */
delete
x
,
y
,
dedy
,
dedx
,
gold
;
delete
x
;
delete
[]
xDimSize
,
yDimSize
,
dedyDimSize
,
dedxDimSize
,
goldDimSize
;
delete
y
;
delete
gold
;
delete
dedx
;
delete
dedy
;
delete
[]
dimSize
;
return
cpuTest
;
return
cpuTest
;
#endif // USE_CUDA
#endif // USE_CUDA
...
@@ -521,26 +249,6 @@ bool TestHardTanH()
...
@@ -521,26 +249,6 @@ bool TestHardTanH()
else
else
XPRINT
(
0
,
stdout
,
">> case 2 passed!
\n
"
);
XPRINT
(
0
,
stdout
,
">> case 2 passed!
\n
"
);
/* case 3 test */
caseFlag
=
TestHardTanH3
();
if
(
!
caseFlag
)
{
returnFlag
=
false
;
XPRINT
(
0
,
stdout
,
">> case 3 failed!
\n
"
);
}
else
XPRINT
(
0
,
stdout
,
">> case 3 passed!
\n
"
);
/* case 4 test */
caseFlag
=
TestHardTanH4
();
if
(
!
caseFlag
)
{
returnFlag
=
false
;
XPRINT
(
0
,
stdout
,
">> case 4 failed!
\n
"
);
}
else
XPRINT
(
0
,
stdout
,
">> case 4 passed!
\n
"
);
/* other cases test */
/* other cases test */
/*
/*
TODO!!
TODO!!
...
...
source/test/TIdentity.cpp
查看文件 @
003def3d
...
@@ -30,15 +30,15 @@ Identity function: y = x
...
@@ -30,15 +30,15 @@ Identity function: y = x
*/
*/
bool
TestIdentity1
()
bool
TestIdentity1
()
{
{
/* a
input
tensor of size (2, 3) */
/* a tensor of size (2, 3) */
int
sO
rder
=
2
;
int
o
rder
=
2
;
int
*
sDimSize
=
new
int
[
sO
rder
];
int
*
dimSize
=
new
int
[
o
rder
];
sD
imSize
[
0
]
=
2
;
d
imSize
[
0
]
=
2
;
sD
imSize
[
1
]
=
3
;
d
imSize
[
1
]
=
3
;
int
sU
nitNum
=
1
;
int
u
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
sO
rder
;
i
++
)
for
(
int
i
=
0
;
i
<
o
rder
;
i
++
)
sUnitNum
*=
sD
imSize
[
i
];
unitNum
*=
d
imSize
[
i
];
DTYPE
xData
[
2
][
3
]
=
{
{
0.0
F
,
1.0
F
,
2.0
F
},
DTYPE
xData
[
2
][
3
]
=
{
{
0.0
F
,
1.0
F
,
2.0
F
},
{
0.5
F
,
0.7
F
,
1.4
F
}
};
{
0.5
F
,
0.7
F
,
1.4
F
}
};
...
@@ -49,47 +49,50 @@ bool TestIdentity1()
...
@@ -49,47 +49,50 @@ bool TestIdentity1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
/* initialize variables */
x
->
SetData
(
xData
,
sU
nitNum
);
x
->
SetData
(
xData
,
u
nitNum
);
y
->
SetZeroAll
();
y
->
SetZeroAll
();
/* call Identity function */
/* call Identity function */
Identity
(
x
,
y
);
Identity
(
x
,
y
);
/* check result */
/* check result */
cpuTest
=
y
->
CheckData
(
answer
,
sU
nitNum
);
cpuTest
=
y
->
CheckData
(
answer
,
u
nitNum
);
#ifdef USE_CUDA
#ifdef USE_CUDA
/* GPU test */
/* GPU test */
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
/* initialize variables */
xGPU
->
SetData
(
xData
,
sU
nitNum
);
xGPU
->
SetData
(
xData
,
u
nitNum
);
yGPU
->
SetZeroAll
();
yGPU
->
SetZeroAll
();
/* call Identity function */
/* call Identity function */
Identity
(
xGPU
,
yGPU
);
Identity
(
xGPU
,
yGPU
);
/* check result */
/* check result */
gpuTest
=
yGPU
->
CheckData
(
answer
,
sU
nitNum
);
gpuTest
=
yGPU
->
CheckData
(
answer
,
u
nitNum
);
/* destroy variables */
/* destroy variables */
delete
x
,
y
;
delete
x
;
delete
xGPU
,
yGPU
;
delete
y
;
delete
[]
sDimSize
;
delete
xGPU
;
delete
yGPU
;
delete
[]
dimSize
;
return
cpuTest
&&
gpuTest
;
return
cpuTest
&&
gpuTest
;
#else
#else
/* destroy variables */
/* destroy variables */
delete
x
,
y
;
delete
x
;
delete
[]
sDimSize
;
delete
y
;
delete
[]
dimSize
;
return
cpuTest
;
return
cpuTest
;
#endif // USE_CUDA
#endif // USE_CUDA
...
@@ -98,35 +101,39 @@ bool TestIdentity1()
...
@@ -98,35 +101,39 @@ bool TestIdentity1()
/*
/*
case 2: test IdentityBackward function.
case 2: test IdentityBackward function.
IdentityBackward function: dE/dx = dE/dy * dy/dx = dE/dy
IdentityBackward function: dE/dx = dE/dy * dy/dx = dE/dy
In this case, lossName=CROSSENTROPY.
*/
*/
bool
TestIdentity2
()
bool
TestIdentity2
()
{
{
int
sOrder
=
2
;
/* a tensor of size (2, 3) */
int
*
sDimSize
=
new
int
[
sOrder
];
int
order
=
2
;
sDimSize
[
0
]
=
1
;
int
*
dimSize
=
new
int
[
order
];
sDimSize
[
1
]
=
3
;
dimSize
[
0
]
=
1
;
dimSize
[
1
]
=
3
;
int
sUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
sOrder
;
i
++
)
int
unitNum
=
1
;
sUnitNum
*=
sDimSize
[
i
];
for
(
int
i
=
0
;
i
<
order
;
i
++
)
unitNum
*=
dimSize
[
i
];
DTYPE
xData
[
1
][
3
]
=
{
{
1.0
F
,
1.0
F
,
2.0
F
}
};
DTYPE
gData
[
1
][
3
]
=
{
{
0.0
F
,
0.0
F
,
1.0
F
}
};
DTYPE
xData
[
3
]
=
{
1.0
F
,
1.0
F
,
2.0
F
};
DTYPE
gData
[
3
]
=
{
0.0
F
,
0.0
F
,
1.0
F
};
DTYPE
yAnswer
[
3
]
=
{
1.0
F
,
1.0
F
,
2.0
F
};
DTYPE
dedyAnswer
[
3
]
=
{
0.0
F
,
0.0
F
,
-
0.5
F
};
DTYPE
dedxAnswer
[
3
]
=
{
0.0
F
,
0.0
F
,
-
0.5
F
};
DTYPE
dedxAnswer
[
3
]
=
{
0.0
F
,
0.0
F
,
-
0.5
F
};
/* CPU test */
/* CPU test */
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
g
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
g
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedy
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
dedy
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedx
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
dedx
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
/* initialize variables */
x
->
SetData
(
xData
,
sU
nitNum
);
x
->
SetData
(
xData
,
u
nitNum
);
g
->
SetData
(
gData
,
sU
nitNum
);
g
->
SetData
(
gData
,
u
nitNum
);
y
->
SetZeroAll
();
y
->
SetZeroAll
();
dedx
->
SetZeroAll
();
dedx
->
SetZeroAll
();
dedy
->
SetZeroAll
();
dedy
->
SetZeroAll
();
...
@@ -138,22 +145,24 @@ bool TestIdentity2()
...
@@ -138,22 +145,24 @@ bool TestIdentity2()
IdentityBackward
(
g
,
y
,
x
,
dedy
,
dedx
,
CROSSENTROPY
);
IdentityBackward
(
g
,
y
,
x
,
dedy
,
dedx
,
CROSSENTROPY
);
/* check result */
/* check result */
cpuTest
=
dedx
->
CheckData
(
dedxAnswer
,
sUnitNum
,
1e-4
F
);
cpuTest
=
y
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedx
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
)
&&
dedy
->
CheckData
(
dedyAnswer
,
unitNum
,
1e-4
F
);
#ifdef USE_CUDA
#ifdef USE_CUDA
/* GPU test */
/* GPU test */
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
/* initialize variables */
xGPU
->
SetData
(
xData
,
sU
nitNum
);
xGPU
->
SetData
(
xData
,
u
nitNum
);
gGPU
->
SetData
(
gData
,
sU
nitNum
);
gGPU
->
SetData
(
gData
,
u
nitNum
);
yGPU
->
SetZeroAll
();
yGPU
->
SetZeroAll
();
dedxGPU
->
SetZeroAll
();
dedxGPU
->
SetZeroAll
();
dedyGPU
->
SetZeroAll
();
dedyGPU
->
SetZeroAll
();
...
@@ -165,7 +174,9 @@ bool TestIdentity2()
...
@@ -165,7 +174,9 @@ bool TestIdentity2()
IdentityBackward
(
gGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
CROSSENTROPY
);
IdentityBackward
(
gGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
CROSSENTROPY
);
/* check result */
/* check result */
gpuTest
=
dedxGPU
->
CheckData
(
dedxAnswer
,
sUnitNum
,
1e-4
F
);
gpuTest
=
yGPU
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedxGPU
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
)
&&
dedyGPU
->
CheckData
(
dedyAnswer
,
unitNum
,
1e-4
F
);
/* destroy variables */
/* destroy variables */
delete
x
;
delete
x
;
...
@@ -178,7 +189,7 @@ bool TestIdentity2()
...
@@ -178,7 +189,7 @@ bool TestIdentity2()
delete
gGPU
;
delete
gGPU
;
delete
dedxGPU
;
delete
dedxGPU
;
delete
dedyGPU
;
delete
dedyGPU
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
&&
gpuTest
;
return
cpuTest
&&
gpuTest
;
#else
#else
...
@@ -188,7 +199,7 @@ bool TestIdentity2()
...
@@ -188,7 +199,7 @@ bool TestIdentity2()
delete
g
;
delete
g
;
delete
dedx
;
delete
dedx
;
delete
dedy
;
delete
dedy
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
;
return
cpuTest
;
#endif // USE_CUDA
#endif // USE_CUDA
...
...
source/test/TLogSoftmax.cpp
查看文件 @
003def3d
...
@@ -30,15 +30,15 @@ LogSoftmax function: y = log(e^x / \sum_{i} e^{x_i})
...
@@ -30,15 +30,15 @@ LogSoftmax function: y = log(e^x / \sum_{i} e^{x_i})
*/
*/
bool
TestLogSoftmax1
()
bool
TestLogSoftmax1
()
{
{
/* a
input
tensor of size (2, 3) */
/* a tensor of size (2, 3) */
int
sO
rder
=
2
;
int
o
rder
=
2
;
int
*
sDimSize
=
new
int
[
sO
rder
];
int
*
dimSize
=
new
int
[
o
rder
];
sD
imSize
[
0
]
=
2
;
d
imSize
[
0
]
=
2
;
sD
imSize
[
1
]
=
3
;
d
imSize
[
1
]
=
3
;
int
sU
nitNum
=
1
;
int
u
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
sO
rder
;
i
++
)
for
(
int
i
=
0
;
i
<
o
rder
;
i
++
)
sUnitNum
*=
sD
imSize
[
i
];
unitNum
*=
d
imSize
[
i
];
DTYPE
xData
[
2
][
3
]
=
{
{
0.0
F
,
1.0
F
,
2.0
F
},
DTYPE
xData
[
2
][
3
]
=
{
{
0.0
F
,
1.0
F
,
2.0
F
},
{
0.5
F
,
0.7
F
,
1.4
F
}
};
{
0.5
F
,
0.7
F
,
1.4
F
}
};
...
@@ -49,50 +49,50 @@ bool TestLogSoftmax1()
...
@@ -49,50 +49,50 @@ bool TestLogSoftmax1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
/* initialize variables */
x
->
SetData
(
xData
,
sU
nitNum
);
x
->
SetData
(
xData
,
u
nitNum
);
y
->
SetZeroAll
();
y
->
SetZeroAll
();
/* call LogSoftmax function */
/* call LogSoftmax function */
LogSoftmax
(
x
,
y
,
1
);
LogSoftmax
(
x
,
y
,
1
);
/* check result */
/* check result */
cpuTest
=
y
->
CheckData
(
answer
,
sU
nitNum
,
1e-4
F
);
cpuTest
=
y
->
CheckData
(
answer
,
u
nitNum
,
1e-4
F
);
#ifdef USE_CUDA
#ifdef USE_CUDA
/* GPU test */
/* GPU test */
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
/* initialize variables */
xGPU
->
SetData
(
xData
,
sU
nitNum
);
xGPU
->
SetData
(
xData
,
u
nitNum
);
yGPU
->
SetZeroAll
();
yGPU
->
SetZeroAll
();
/* call LogSoftmax function */
/* call LogSoftmax function */
LogSoftmax
(
xGPU
,
yGPU
,
1
);
LogSoftmax
(
xGPU
,
yGPU
,
1
);
/* check result */
/* check result */
gpuTest
=
yGPU
->
CheckData
(
answer
,
sU
nitNum
,
1e-4
F
);
gpuTest
=
yGPU
->
CheckData
(
answer
,
u
nitNum
,
1e-4
F
);
/* destroy variables */
/* destroy variables */
delete
x
;
delete
x
;
delete
y
;
delete
y
;
delete
xGPU
;
delete
xGPU
;
delete
yGPU
;
delete
yGPU
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
&&
gpuTest
;
return
cpuTest
&&
gpuTest
;
#else
#else
/* destroy variables */
/* destroy variables */
delete
x
;
delete
x
;
delete
y
;
delete
y
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
;
return
cpuTest
;
#endif // USE_CUDA
#endif // USE_CUDA
...
@@ -102,37 +102,38 @@ bool TestLogSoftmax1()
...
@@ -102,37 +102,38 @@ bool TestLogSoftmax1()
case 2: test LogSoftmaxBackward function.
case 2: test LogSoftmaxBackward function.
dE/dx = dE/dy * dy/dx
dE/dx = dE/dy * dy/dx
log softmax: y_i = log(e^{x_i} / \sum_{k} e^{x_k})
log softmax: y_i = log(e^{x_i} / \sum_{k} e^{x_k})
In this case, LossName=CROSSENTROPY.
*/
*/
bool
TestLogSoftmax2
()
bool
TestLogSoftmax2
()
{
{
/* a
input tensor of size (
3) */
/* a
tensor of size (1,
3) */
int
sO
rder
=
2
;
int
o
rder
=
2
;
int
*
sDimSize
=
new
int
[
sO
rder
];
int
*
dimSize
=
new
int
[
o
rder
];
sD
imSize
[
0
]
=
1
;
d
imSize
[
0
]
=
1
;
sD
imSize
[
1
]
=
3
;
d
imSize
[
1
]
=
3
;
int
sU
nitNum
=
1
;
int
u
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
sO
rder
;
i
++
)
for
(
int
i
=
0
;
i
<
o
rder
;
i
++
)
sUnitNum
*=
sD
imSize
[
i
];
unitNum
*=
d
imSize
[
i
];
DTYPE
xData
[
1
][
3
]
=
{
{
0.0
F
,
1.0
F
,
2.0
F
}
};
DTYPE
xData
[
1
][
3
]
=
{
0.0
F
,
1.0
F
,
2.0
F
};
DTYPE
gData
[
1
][
3
]
=
{
0.5
F
,
0.8
F
,
1.5
F
};
DTYPE
gData
[
1
][
3
]
=
{
0.5
F
,
0.8
F
,
1.5
F
};
DTYPE
yAnswer
[
1
][
3
]
=
{
-
2.4076
F
,
-
1.4076
F
,
-
0.4076
F
};
DTYPE
yAnswer
[
1
][
3
]
=
{
-
2.4076
F
,
-
1.4076
F
,
-
0.4076
F
};
DTYPE
dedxAnswer
[
1
][
3
]
=
{
-
0.4
09969
F
,
-
0.555272
F
,
-
0.834759
F
};
DTYPE
dedxAnswer
[
1
][
3
]
=
{
-
0.4
100
F
,
-
0.5553
F
,
-
0.8348
F
};
/* CPU test */
/* CPU test */
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
g
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
g
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedy
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
dedy
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedx
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
dedx
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
/* initialize variables */
x
->
SetData
(
xData
,
sU
nitNum
);
x
->
SetData
(
xData
,
u
nitNum
);
g
->
SetData
(
gData
,
sU
nitNum
);
g
->
SetData
(
gData
,
u
nitNum
);
y
->
SetZeroAll
();
y
->
SetZeroAll
();
dedx
->
SetZeroAll
();
dedx
->
SetZeroAll
();
dedy
->
SetZeroAll
();
dedy
->
SetZeroAll
();
...
@@ -141,25 +142,26 @@ bool TestLogSoftmax2()
...
@@ -141,25 +142,26 @@ bool TestLogSoftmax2()
LogSoftmax
(
x
,
y
,
1
);
LogSoftmax
(
x
,
y
,
1
);
/* call LogSoftmaxBackward function */
/* call LogSoftmaxBackward function */
LogSoftmaxBackward
(
g
,
y
,
x
,
dedy
,
dedx
,
0
,
CROSSENTROPY
);
LogSoftmaxBackward
(
g
,
y
,
x
,
dedy
,
dedx
,
1
,
CROSSENTROPY
);
/* check result */
/* check result */
cpuTest
=
y
->
CheckData
(
yAnswer
,
sUnitNum
,
1e-4
F
)
&&
dedx
->
CheckData
(
dedxAnswer
,
sUnitNum
,
1e-4
F
);
cpuTest
=
y
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedx
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
);
#ifdef USE_CUDA
#ifdef USE_CUDA
/* GPU test */
/* GPU test */
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
/* initialize variables */
xGPU
->
SetData
(
xData
,
sU
nitNum
);
xGPU
->
SetData
(
xData
,
u
nitNum
);
gGPU
->
SetData
(
gData
,
sU
nitNum
);
gGPU
->
SetData
(
gData
,
u
nitNum
);
yGPU
->
SetZeroAll
();
yGPU
->
SetZeroAll
();
dedxGPU
->
SetZeroAll
();
dedxGPU
->
SetZeroAll
();
dedyGPU
->
SetZeroAll
();
dedyGPU
->
SetZeroAll
();
...
@@ -168,10 +170,10 @@ bool TestLogSoftmax2()
...
@@ -168,10 +170,10 @@ bool TestLogSoftmax2()
LogSoftmax
(
xGPU
,
yGPU
,
1
);
LogSoftmax
(
xGPU
,
yGPU
,
1
);
/* call LogSoftmaxBackward function */
/* call LogSoftmaxBackward function */
LogSoftmaxBackward
(
gGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
0
,
CROSSENTROPY
);
LogSoftmaxBackward
(
gGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
1
,
CROSSENTROPY
);
/* check result */
/* check result */
gpuTest
=
yGPU
->
CheckData
(
yAnswer
,
sUnitNum
,
1e-4
F
)
&&
dedxGPU
->
CheckData
(
dedxAnswer
,
sU
nitNum
,
1e-4
F
);
gpuTest
=
yGPU
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedxGPU
->
CheckData
(
dedxAnswer
,
u
nitNum
,
1e-4
F
);
/* destroy variables */
/* destroy variables */
delete
x
;
delete
x
;
...
@@ -184,7 +186,7 @@ bool TestLogSoftmax2()
...
@@ -184,7 +186,7 @@ bool TestLogSoftmax2()
delete
gGPU
;
delete
gGPU
;
delete
dedxGPU
;
delete
dedxGPU
;
delete
dedyGPU
;
delete
dedyGPU
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
&&
gpuTest
;
return
cpuTest
&&
gpuTest
;
#else
#else
...
@@ -194,7 +196,7 @@ bool TestLogSoftmax2()
...
@@ -194,7 +196,7 @@ bool TestLogSoftmax2()
delete
g
;
delete
g
;
delete
dedx
;
delete
dedx
;
delete
dedy
;
delete
dedy
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
;
return
cpuTest
;
#endif // USE_CUDA
#endif // USE_CUDA
...
@@ -204,37 +206,38 @@ bool TestLogSoftmax2()
...
@@ -204,37 +206,38 @@ bool TestLogSoftmax2()
case 3: test LogSoftmaxBackward function.
case 3: test LogSoftmaxBackward function.
dE/dx = dE/dy * dy/dx
dE/dx = dE/dy * dy/dx
log softmax: y_i = log(e^{x_i} / \sum_{k} e^{x_k})
log softmax: y_i = log(e^{x_i} / \sum_{k} e^{x_k})
In this case, LossName=SQUAREDERROR
*/
*/
bool
TestLogSoftmax3
()
bool
TestLogSoftmax3
()
{
{
/* a tensor of size (1, 3) */
/* a tensor of size (1, 3) */
int
sO
rder
=
2
;
int
o
rder
=
2
;
int
*
sDimSize
=
new
int
[
sO
rder
];
int
*
dimSize
=
new
int
[
o
rder
];
sD
imSize
[
0
]
=
1
;
d
imSize
[
0
]
=
1
;
sD
imSize
[
1
]
=
3
;
d
imSize
[
1
]
=
3
;
int
sU
nitNum
=
1
;
int
u
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
sO
rder
;
i
++
)
for
(
int
i
=
0
;
i
<
o
rder
;
i
++
)
sUnitNum
*=
sD
imSize
[
i
];
unitNum
*=
d
imSize
[
i
];
DTYPE
xData
[
1
][
3
]
=
{
{
0.0
F
,
1.0
F
,
2.0
F
}
};
DTYPE
xData
[
1
][
3
]
=
{
0.0
F
,
1.0
F
,
2.0
F
};
DTYPE
gData
[
1
][
3
]
=
{
{
0.5
F
,
0.8
F
,
1.5
F
}
};
DTYPE
gData
[
1
][
3
]
=
{
0.5
F
,
0.8
F
,
1.5
F
};
DTYPE
yAnswer
[
1
][
3
]
=
{
-
2.4076
F
,
-
1.4076
F
,
-
0.4076
F
};
DTYPE
yAnswer
[
1
][
3
]
=
{
-
2.4076
F
,
-
1.4076
F
,
-
0.4076
F
};
DTYPE
dedxAnswer
[
1
][
3
]
=
{
-
0.4
09969
F
,
-
0.555272
F
,
-
0.834759
F
};
DTYPE
dedxAnswer
[
1
][
3
]
=
{
-
0.4
100
F
,
-
0.5553
F
,
-
0.8348
F
};
/* CPU test */
/* CPU test */
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
g
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
g
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedy
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
dedy
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedx
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
dedx
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
/* initialize variables */
x
->
SetData
(
xData
,
sU
nitNum
);
x
->
SetData
(
xData
,
u
nitNum
);
g
->
SetData
(
gData
,
sU
nitNum
);
g
->
SetData
(
gData
,
u
nitNum
);
y
->
SetZeroAll
();
y
->
SetZeroAll
();
dedx
->
SetZeroAll
();
dedx
->
SetZeroAll
();
dedy
->
SetZeroAll
();
dedy
->
SetZeroAll
();
...
@@ -243,25 +246,26 @@ bool TestLogSoftmax3()
...
@@ -243,25 +246,26 @@ bool TestLogSoftmax3()
LogSoftmax
(
x
,
y
,
1
);
LogSoftmax
(
x
,
y
,
1
);
/* call LogSoftmaxBackward function */
/* call LogSoftmaxBackward function */
LogSoftmaxBackward
(
g
,
y
,
x
,
dedy
,
dedx
,
1
,
CROSSENTROPY
);
LogSoftmaxBackward
(
g
,
y
,
x
,
dedy
,
dedx
,
1
,
SQUAREDERROR
);
/* check result */
/* check result */
cpuTest
=
y
->
CheckData
(
yAnswer
,
sUnitNum
,
1e-4
F
)
&&
dedx
->
CheckData
(
dedxAnswer
,
sUnitNum
,
1e-4
F
);
cpuTest
=
y
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedx
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
);
#ifdef USE_CUDA
#ifdef USE_CUDA
/* GPU test */
/* GPU test */
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
/* initialize variables */
xGPU
->
SetData
(
xData
,
sU
nitNum
);
xGPU
->
SetData
(
xData
,
u
nitNum
);
gGPU
->
SetData
(
gData
,
sU
nitNum
);
gGPU
->
SetData
(
gData
,
u
nitNum
);
yGPU
->
SetZeroAll
();
yGPU
->
SetZeroAll
();
dedxGPU
->
SetZeroAll
();
dedxGPU
->
SetZeroAll
();
dedyGPU
->
SetZeroAll
();
dedyGPU
->
SetZeroAll
();
...
@@ -270,10 +274,11 @@ bool TestLogSoftmax3()
...
@@ -270,10 +274,11 @@ bool TestLogSoftmax3()
LogSoftmax
(
xGPU
,
yGPU
,
1
);
LogSoftmax
(
xGPU
,
yGPU
,
1
);
/* call LogSoftmaxBackward function */
/* call LogSoftmaxBackward function */
LogSoftmaxBackward
(
gGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
1
,
CROSSENTROPY
);
LogSoftmaxBackward
(
gGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
1
,
SQUAREDERROR
);
/* check result */
/* check result */
gpuTest
=
yGPU
->
CheckData
(
yAnswer
,
sUnitNum
,
1e-4
F
)
&&
dedxGPU
->
CheckData
(
dedxAnswer
,
sUnitNum
,
1e-4
F
);
gpuTest
=
yGPU
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedxGPU
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-3
F
);
/* destroy variables */
/* destroy variables */
delete
x
;
delete
x
;
...
@@ -286,7 +291,7 @@ bool TestLogSoftmax3()
...
@@ -286,7 +291,7 @@ bool TestLogSoftmax3()
delete
gGPU
;
delete
gGPU
;
delete
dedxGPU
;
delete
dedxGPU
;
delete
dedyGPU
;
delete
dedyGPU
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
&&
gpuTest
;
return
cpuTest
&&
gpuTest
;
#else
#else
...
@@ -296,13 +301,12 @@ bool TestLogSoftmax3()
...
@@ -296,13 +301,12 @@ bool TestLogSoftmax3()
delete
g
;
delete
g
;
delete
dedx
;
delete
dedx
;
delete
dedy
;
delete
dedy
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
;
return
cpuTest
;
#endif // USE_CUDA
#endif // USE_CUDA
}
}
/* other cases */
/* other cases */
/*
/*
TODO!!
TODO!!
...
@@ -311,7 +315,7 @@ bool TestLogSoftmax3()
...
@@ -311,7 +315,7 @@ bool TestLogSoftmax3()
/* test for LogSoftmax Function */
/* test for LogSoftmax Function */
bool
TestLogSoftmax
()
bool
TestLogSoftmax
()
{
{
XPRINT
(
0
,
stdout
,
"[TEST LogSoftmax]
test log
softmax function and its backward computation
\n
"
);
XPRINT
(
0
,
stdout
,
"[TEST LogSoftmax]
log
softmax function and its backward computation
\n
"
);
bool
returnFlag
=
true
,
caseFlag
=
true
;
bool
returnFlag
=
true
,
caseFlag
=
true
;
/* case 1 test */
/* case 1 test */
...
...
source/test/TLoss.cpp
查看文件 @
003def3d
...
@@ -20,16 +20,15 @@
...
@@ -20,16 +20,15 @@
*/
*/
#include "../core/math/ScaleAndShift.h"
#include "../core/math/ScaleAndShift.h"
#include "../function/Loss.h"
#include "TLoss.h"
#include "TLoss.h"
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
/*
/*
case 1: test LossCompute function
case 1: test LossCompute function
.
In this case, Loss function name = SQUAREDERROR.
In this case, Loss function name = SQUAREDERROR.
loss = sum_{i} 0.5*(t_i - y_i)^2,
loss = sum_{i} 0.5*(t_i - y_i)^2,
where t_i is the gold standard and y_i is the model output
where t_i is the gold standard and y_i is the model output
.
*/
*/
bool
TestLoss1
()
bool
TestLoss1
()
{
{
...
@@ -103,10 +102,10 @@ bool TestLoss1()
...
@@ -103,10 +102,10 @@ bool TestLoss1()
}
}
/*
/*
case 2: test LossCompute function
case 2: test LossCompute function
.
In this case, Loss function name = CROSSENTROPY.
In this case, Loss function name = CROSSENTROPY.
loss = sum_{i} (-t_i * log(y_i))
loss = sum_{i} (-t_i * log(y_i))
where t_i is the gold standard and y_i is the model output
where t_i is the gold standard and y_i is the model output
.
*/
*/
bool
TestLoss2
()
bool
TestLoss2
()
{
{
...
@@ -180,10 +179,10 @@ bool TestLoss2()
...
@@ -180,10 +179,10 @@ bool TestLoss2()
}
}
/*
/*
case 3: test LossCompute function
case 3: test LossCompute function
.
In this case, Loss function name = ONEHOTERROR.
In this case, Loss function name = ONEHOTERROR.
loss = sum_{i} e_i
loss = sum_{i} e_i
where e_i = 0.5*(t_i - y_i)^2 if t_i = 1, e_i = 0 otherwise
where e_i = 0.5*(t_i - y_i)^2 if t_i = 1, e_i = 0 otherwise
.
*/
*/
bool
TestLoss3
()
bool
TestLoss3
()
{
{
...
...
source/test/TMatrixMulBatched.cpp
查看文件 @
003def3d
...
@@ -19,6 +19,7 @@
...
@@ -19,6 +19,7 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15
*/
*/
#include "../XTensor.h"
#include "TMatrixMulBatched.h"
#include "TMatrixMulBatched.h"
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
...
...
source/test/TRectify.cpp
查看文件 @
003def3d
...
@@ -29,25 +29,15 @@ In this case, y = max(0, x)
...
@@ -29,25 +29,15 @@ In this case, y = max(0, x)
*/
*/
bool
TestRectify1
()
bool
TestRectify1
()
{
{
/* a x tensor of size (2, 3) */
/* a tensor of size (2, 3) */
int
xOrder
=
2
;
int
order
=
2
;
int
*
xDimSize
=
new
int
[
xOrder
];
int
*
dimSize
=
new
int
[
order
];
xDimSize
[
0
]
=
2
;
dimSize
[
0
]
=
2
;
xDimSize
[
1
]
=
3
;
dimSize
[
1
]
=
3
;
int
xUnitNum
=
1
;
int
unitNum
=
1
;
for
(
int
i
=
0
;
i
<
xOrder
;
i
++
)
for
(
int
i
=
0
;
i
<
order
;
i
++
)
xUnitNum
*=
xDimSize
[
i
];
unitNum
*=
dimSize
[
i
];
/* a y tensor of size (2, 3) */
int
yOrder
=
2
;
int
*
yDimSize
=
new
int
[
yOrder
];
yDimSize
[
0
]
=
2
;
yDimSize
[
1
]
=
3
;
int
yUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
yOrder
;
i
++
)
yUnitNum
*=
yDimSize
[
i
];
DTYPE
xData
[
2
][
3
]
=
{
{
0.0
F
,
-
1.0
F
,
2.0
F
},
DTYPE
xData
[
2
][
3
]
=
{
{
0.0
F
,
-
1.0
F
,
2.0
F
},
{
3.0
F
,
-
4.0
F
,
-
5.0
F
}
};
{
3.0
F
,
-
4.0
F
,
-
5.0
F
}
};
...
@@ -58,52 +48,50 @@ bool TestRectify1()
...
@@ -58,52 +48,50 @@ bool TestRectify1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
xOrder
,
xD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
yOrder
,
yD
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
/* initialize variables */
x
->
SetData
(
xData
,
xU
nitNum
);
x
->
SetData
(
xData
,
u
nitNum
);
y
->
SetZeroAll
();
y
->
SetZeroAll
();
/* call Rectify function */
/* call Rectify function */
Rectify
(
x
,
y
);
Rectify
(
x
,
y
);
/* check results */
/* check results */
cpuTest
=
y
->
CheckData
(
answer
,
yU
nitNum
);
cpuTest
=
y
->
CheckData
(
answer
,
u
nitNum
);
#ifdef USE_CUDA
#ifdef USE_CUDA
/* GPU test */
/* GPU test */
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
xGPU
=
NewTensor
(
xOrder
,
xD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
yOrder
,
yD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* Initialize variables */
/* Initialize variables */
xGPU
->
SetData
(
xData
,
xU
nitNum
);
xGPU
->
SetData
(
xData
,
u
nitNum
);
yGPU
->
SetZeroAll
();
yGPU
->
SetZeroAll
();
/* call Rectify function */
/* call Rectify function */
Rectify
(
xGPU
,
yGPU
);
Rectify
(
xGPU
,
yGPU
);
/* check results */
/* check results */
gpuTest
=
yGPU
->
CheckData
(
answer
,
yU
nitNum
);
gpuTest
=
yGPU
->
CheckData
(
answer
,
u
nitNum
);
/* destroy variables */
/* destroy variables */
delete
x
;
delete
x
;
delete
y
;
delete
y
;
delete
xGPU
;
delete
xGPU
;
delete
yGPU
;
delete
yGPU
;
delete
[]
xDimSize
;
delete
[]
dimSize
;
delete
[]
yDimSize
;
return
cpuTest
&&
gpuTest
;
return
cpuTest
&&
gpuTest
;
#else
#else
/* destroy variables */
/* destroy variables */
delete
x
;
delete
x
;
delete
y
;
delete
y
;
delete
[]
xDimSize
;
delete
[]
dimSize
;
delete
[]
yDimSize
;
return
cpuTest
;
return
cpuTest
;
#endif // USE_CUDA
#endif // USE_CUDA
...
@@ -117,73 +105,83 @@ In this case, lossName=CROSSENTROPY.
...
@@ -117,73 +105,83 @@ In this case, lossName=CROSSENTROPY.
*/
*/
bool
TestRectify2
()
bool
TestRectify2
()
{
{
/* a
x
tensor of size (2, 3) */
/* a tensor of size (2, 3) */
int
xO
rder
=
2
;
int
o
rder
=
2
;
int
*
xDimSize
=
new
int
[
xO
rder
];
int
*
dimSize
=
new
int
[
o
rder
];
xD
imSize
[
0
]
=
2
;
d
imSize
[
0
]
=
2
;
xD
imSize
[
1
]
=
3
;
d
imSize
[
1
]
=
3
;
int
xU
nitNum
=
1
;
int
u
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
xO
rder
;
i
++
)
for
(
int
i
=
0
;
i
<
o
rder
;
i
++
)
xUnitNum
*=
xD
imSize
[
i
];
unitNum
*=
d
imSize
[
i
];
DTYPE
xData
[
2
][
3
]
=
{
{
1.0
F
,
1.0
F
,
2.0
F
},
DTYPE
xData
[
2
][
3
]
=
{
{
1.0
F
,
1.0
F
,
2.0
F
},
{
2.0
F
,
4.0
F
,
5.0
F
}
};
{
2.0
F
,
4.0
F
,
5.0
F
}
};
DTYPE
yData
[
2
][
3
]
=
{
{
1.0
F
,
1.0
F
,
2.0
F
},
{
2.0
F
,
4.0
F
,
5.0
F
}
};
DTYPE
goldData
[
2
][
3
]
=
{
{
1.0
F
,
1.0
F
,
1.0
F
},
DTYPE
goldData
[
2
][
3
]
=
{
{
1.0
F
,
1.0
F
,
1.0
F
},
{
1.0
F
,
1.0
F
,
1.0
F
}
};
{
1.0
F
,
1.0
F
,
1.0
F
}
};
DTYPE
dedyData
[
2
][
3
]
=
{
{
-
1.0
F
,
-
1.0
F
,
-
0.5
F
},
DTYPE
yAnswer
[
2
][
3
]
=
{
{
1.0
F
,
1.0
F
,
2.0
F
},
{
2.0
F
,
4.0
F
,
5.0
F
}
};
DTYPE
dedyAnswer
[
2
][
3
]
=
{
{
-
1.0
F
,
-
1.0
F
,
-
0.5
F
},
{
-
0.5
F
,
-
0.25
F
,
-
0.2
F
}
};
{
-
0.5
F
,
-
0.25
F
,
-
0.2
F
}
};
DTYPE
a
nswer
[
2
][
3
]
=
{
{
-
1.0
F
,
-
1.0
F
,
-
0.5
F
},
DTYPE
dedxA
nswer
[
2
][
3
]
=
{
{
-
1.0
F
,
-
1.0
F
,
-
0.5
F
},
{
-
0.5
F
,
-
0.25
F
,
-
0.2
F
}
};
{
-
0.5
F
,
-
0.25
F
,
-
0.2
F
}
};
/* CPU test */
/* CPU test */
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
xOrder
,
xD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
xOrder
,
xD
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
gold
=
NewTensor
(
xOrder
,
xD
imSize
);
XTensor
*
gold
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedy
=
NewTensor
(
xOrder
,
xD
imSize
);
XTensor
*
dedy
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedx
=
NewTensor
(
xOrder
,
xD
imSize
);
XTensor
*
dedx
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
/* initialize variables */
x
->
SetData
(
xData
,
xU
nitNum
);
x
->
SetData
(
xData
,
u
nitNum
);
y
->
SetData
(
yData
,
xU
nitNum
);
gold
->
SetData
(
goldData
,
u
nitNum
);
gold
->
SetData
(
goldData
,
xUnitNum
);
y
->
SetZeroAll
(
);
dedy
->
Set
Data
(
dedyData
,
xUnitNum
);
dedy
->
Set
ZeroAll
(
);
dedx
->
SetZeroAll
();
dedx
->
SetZeroAll
();
/* call Rectify function */
Rectify
(
x
,
y
);
/* call RectifyBackward function */
/* call RectifyBackward function */
RectifyBackward
(
gold
,
y
,
x
,
dedy
,
dedx
,
NOLOSS
);
RectifyBackward
(
gold
,
y
,
x
,
dedy
,
dedx
,
CROSSENTROPY
);
/* check results */
/* check results */
cpuTest
=
dedx
->
CheckData
(
answer
,
xUnitNum
);
cpuTest
=
y
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedx
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
)
&&
dedy
->
CheckData
(
dedyAnswer
,
unitNum
,
1e-4
F
);
#ifdef USE_CUDA
#ifdef USE_CUDA
/* GPU test */
/* GPU test */
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
xOrder
,
xD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
xOrder
,
xD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
(
xOrder
,
xD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
xOrder
,
xD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
xOrder
,
xD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
/* initialize variables */
xGPU
->
SetData
(
xData
,
xU
nitNum
);
xGPU
->
SetData
(
xData
,
u
nitNum
);
yGPU
->
SetData
(
yData
,
xU
nitNum
);
goldGPU
->
SetData
(
goldData
,
u
nitNum
);
goldGPU
->
SetData
(
goldData
,
xUnitNum
);
yGPU
->
SetZeroAll
(
);
dedyGPU
->
Set
Data
(
dedyData
,
xUnitNum
);
dedyGPU
->
Set
ZeroAll
(
);
dedxGPU
->
SetZeroAll
();
dedxGPU
->
SetZeroAll
();
/* call Rectify function */
Rectify
(
xGPU
,
yGPU
);
/* call rectifybackward function */
/* call rectifybackward function */
RectifyBackward
(
goldGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
NOLOSS
);
RectifyBackward
(
goldGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
CROSSENTROPY
);
/* check results */
/* check results */
gpuTest
=
dedxGPU
->
CheckData
(
answer
,
xUnitNum
);
gpuTest
=
yGPU
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedxGPU
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
)
&&
dedyGPU
->
CheckData
(
dedyAnswer
,
unitNum
,
1e-4
F
);
/* destroy variables */
/* destroy variables */
delete
x
;
delete
x
;
...
@@ -196,7 +194,7 @@ bool TestRectify2()
...
@@ -196,7 +194,7 @@ bool TestRectify2()
delete
dedyGPU
;
delete
dedyGPU
;
delete
dedxGPU
;
delete
dedxGPU
;
delete
goldGPU
;
delete
goldGPU
;
delete
[]
xD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
&&
gpuTest
;
return
cpuTest
&&
gpuTest
;
#else
#else
...
@@ -206,7 +204,7 @@ bool TestRectify2()
...
@@ -206,7 +204,7 @@ bool TestRectify2()
delete
dedy
;
delete
dedy
;
delete
dedx
;
delete
dedx
;
delete
gold
;
delete
gold
;
delete
[]
xD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
;
return
cpuTest
;
#endif // USE_CUDA
#endif // USE_CUDA
...
@@ -220,7 +218,7 @@ TODO!!
...
@@ -220,7 +218,7 @@ TODO!!
/* test for Rectify Function */
/* test for Rectify Function */
bool
TestRectify
()
bool
TestRectify
()
{
{
XPRINT
(
0
,
stdout
,
"[TEST RECTIFY]
test rectify
and its backward computation
\n
"
);
XPRINT
(
0
,
stdout
,
"[TEST RECTIFY]
rectify function
and its backward computation
\n
"
);
bool
returnFlag
=
true
,
caseFlag
=
true
;
bool
returnFlag
=
true
,
caseFlag
=
true
;
/* case 1 test */
/* case 1 test */
...
...
source/test/TSetAscendingOrder.cpp
查看文件 @
003def3d
...
@@ -23,8 +23,7 @@
...
@@ -23,8 +23,7 @@
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
/* case 1: set the cell to the ascending order along a given dimension.
/* case 1: set the cell to the ascending order along a given dimension. */
*/
bool
TestSetAscendingOrder1
()
bool
TestSetAscendingOrder1
()
{
{
/* a input tensor of size (2, 4) */
/* a input tensor of size (2, 4) */
...
@@ -50,7 +49,6 @@ bool TestSetAscendingOrder1()
...
@@ -50,7 +49,6 @@ bool TestSetAscendingOrder1()
s
->
SetZeroAll
();
s
->
SetZeroAll
();
/* call SetAscendingOrder function */
/* call SetAscendingOrder function */
s
->
SetAscendingOrder
(
1
);
s
->
SetAscendingOrder
(
1
);
/* check results */
/* check results */
...
...
source/test/TSetData.cpp
查看文件 @
003def3d
...
@@ -23,7 +23,10 @@
...
@@ -23,7 +23,10 @@
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
/* case 1: set the cell to the ascending order along a given dimension. */
/*
case 1: test SetDataRand function.
set the tensor items by a uniform distribution in range [lower, upper].
*/
bool
TestSetData1
()
bool
TestSetData1
()
{
{
/* a input tensor of size (2, 4) */
/* a input tensor of size (2, 4) */
...
@@ -44,7 +47,7 @@ bool TestSetData1()
...
@@ -44,7 +47,7 @@ bool TestSetData1()
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
/* call SetData function */
/* call SetData
Rand
function */
s
->
SetDataRand
(
0.0
,
1.0
);
s
->
SetDataRand
(
0.0
,
1.0
);
/* check results */
/* check results */
...
...
source/test/TSigmoid.cpp
查看文件 @
003def3d
...
@@ -25,102 +25,71 @@
...
@@ -25,102 +25,71 @@
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
/*
/*
case 1: test Sigmoid function
and SigmoidBackward function
.
case 1: test Sigmoid function.
sigmoid function: y = 1/(1+exp(-x))
sigmoid function: y = 1/(1+exp(-x))
backward computation: dE/ds = dE/dy * dy/dx
*/
*/
bool
TestSigmoid1
()
bool
TestSigmoid1
()
{
{
/* a input tensor of size (3) */
/* a input tensor of size (3) */
int
sO
rder
=
1
;
int
o
rder
=
1
;
int
*
sDimSize
=
new
int
[
sO
rder
];
int
*
dimSize
=
new
int
[
o
rder
];
sD
imSize
[
0
]
=
3
;
d
imSize
[
0
]
=
3
;
int
sU
nitNum
=
1
;
int
u
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
sO
rder
;
i
++
)
for
(
int
i
=
0
;
i
<
o
rder
;
i
++
)
sUnitNum
*=
sD
imSize
[
i
];
unitNum
*=
d
imSize
[
i
];
DTYPE
xData
[
3
]
=
{
0.0
F
,
1.0
F
,
2.0
F
};
DTYPE
xData
[
3
]
=
{
0.0
F
,
1.0
F
,
2.0
F
};
DTYPE
gData
[
3
]
=
{
0.4
F
,
0.8
F
,
1.0
F
};
DTYPE
answer
[
3
]
=
{
0.5
F
,
0.7311
F
,
0.8808
F
};
DTYPE
dedyData
[
3
]
=
{
-
0.8
F
,
-
1.094
F
,
-
1.135
F
};
DTYPE
yAnswer
[
3
]
=
{
0.5
F
,
0.731
F
,
0.881
F
};
DTYPE
dedxAnswer
[
3
]
=
{
-
0.2
F
,
-
0.215
F
,
-
0.119
F
};
/* CPU test */
/* CPU test */
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
x
=
NewTensor
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
y
=
NewTensor
(
order
,
dimSize
);
XTensor
*
g
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
dedy
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
dedx
=
NewTensor
(
sOrder
,
sDimSize
);
/* initialize variables */
/* initialize variables */
x
->
SetData
(
xData
,
sUnitNum
);
x
->
SetData
(
xData
,
unitNum
);
g
->
SetData
(
gData
,
sUnitNum
);
dedy
->
SetData
(
dedyData
,
sUnitNum
);
y
->
SetZeroAll
();
y
->
SetZeroAll
();
dedx
->
SetZeroAll
();
/* call Sigmoid function */
/* call Sigmoid function */
Sigmoid
(
x
,
y
);
Sigmoid
(
x
,
y
);
/* call SigmoidBackward function */
SigmoidBackward
(
g
,
y
,
x
,
dedy
,
dedx
,
NOLOSS
);
/* check result */
/* check result */
cpuTest
=
y
->
CheckData
(
yAnswer
,
sUnitNum
,
0.001
F
)
&&
dedx
->
CheckData
(
dedxAnswer
,
sUnitNum
,
0.001
F
);
cpuTest
=
y
->
CheckData
(
answer
,
unitNum
,
1e-4
F
);
#ifdef USE_CUDA
#ifdef USE_CUDA
/* GPU test */
/* GPU test */
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
/* initialize variables */
xGPU
->
SetData
(
xData
,
sUnitNum
);
xGPU
->
SetData
(
xData
,
unitNum
);
gGPU
->
SetData
(
gData
,
sUnitNum
);
dedyGPU
->
SetData
(
dedyData
,
sUnitNum
);
yGPU
->
SetZeroAll
();
yGPU
->
SetZeroAll
();
dedxGPU
->
SetZeroAll
();
/* call Sigmoid function */
/* call Sigmoid function */
Sigmoid
(
xGPU
,
yGPU
);
Sigmoid
(
xGPU
,
yGPU
);
/* call SigmoidBackward function */
SigmoidBackward
(
gGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
NOLOSS
);
/* check result */
/* check result */
gpuTest
=
yGPU
->
CheckData
(
yAnswer
,
sUnitNum
,
0.001
F
)
&&
dedxGPU
->
CheckData
(
dedxAnswer
,
sUnitNum
,
0.001
F
);
gpuTest
=
yGPU
->
CheckData
(
answer
,
unitNum
,
1e-4
F
);
/* destroy variables */
/* destroy variables */
delete
x
;
delete
x
;
delete
y
;
delete
y
;
delete
g
;
delete
dedx
;
delete
dedy
;
delete
xGPU
;
delete
xGPU
;
delete
yGPU
;
delete
yGPU
;
delete
gGPU
;
delete
[]
dimSize
;
delete
dedxGPU
;
delete
dedyGPU
;
delete
[]
sDimSize
;
return
cpuTest
&&
gpuTest
;
return
cpuTest
&&
gpuTest
;
#else
#else
/* destroy variables */
/* destroy variables */
delete
x
;
delete
x
;
delete
y
;
delete
y
;
delete
g
;
delete
[]
dimSize
;
delete
dedx
;
delete
dedy
;
delete
[]
sDimSize
;
return
cpuTest
;
return
cpuTest
;
#endif // USE_CUDA
#endif // USE_CUDA
...
@@ -129,70 +98,72 @@ bool TestSigmoid1()
...
@@ -129,70 +98,72 @@ bool TestSigmoid1()
/*
/*
case 2: test Sigmoid function and SigmoidBackward function.
case 2: test Sigmoid function and SigmoidBackward function.
sigmoid function: y = 1/(1+exp(-x))
sigmoid function: y = 1/(1+exp(-x))
backward computation: dE/ds = dE/dy * dy/dx
backward computation:
dE/ds = dE/dy * dy/dx
dy/dx = y * (1 -y)
In this case, LossName=CROSSENTROPY.
*/
*/
bool
TestSigmoid2
()
bool
TestSigmoid2
()
{
{
/* a input tensor of size (3) */
/* a input tensor of size (3) */
int
sO
rder
=
1
;
int
o
rder
=
1
;
int
*
sDimSize
=
new
int
[
sO
rder
];
int
*
dimSize
=
new
int
[
o
rder
];
sD
imSize
[
0
]
=
3
;
d
imSize
[
0
]
=
3
;
int
sU
nitNum
=
1
;
int
u
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
sO
rder
;
i
++
)
for
(
int
i
=
0
;
i
<
o
rder
;
i
++
)
sUnitNum
*=
sD
imSize
[
i
];
unitNum
*=
d
imSize
[
i
];
DTYPE
xData
[
3
]
=
{
0.0
F
,
1.0
F
,
2.0
F
};
DTYPE
xData
[
3
]
=
{
0.0
F
,
1.0
F
,
2.0
F
};
DTYPE
gData
[
3
]
=
{
0.4
F
,
0.8
F
,
1.0
F
};
DTYPE
gData
[
3
]
=
{
0.4
F
,
0.8
F
,
1.0
F
};
DTYPE
dedyData
[
3
]
=
{
-
0.8
F
,
-
1.094
F
,
-
1.135
F
};
DTYPE
yAnswer
[
3
]
=
{
0.5
F
,
0.7311
F
,
0.8808
F
};
DTYPE
yAnswer
[
3
]
=
{
0.5
F
,
0.731
F
,
0.881
F
};
DTYPE
dedyAnswer
[
3
]
=
{
-
0.8
F
,
-
1.0943
F
,
-
1.1353
F
};
DTYPE
dedxAnswer
[
3
]
=
{
-
0.2
F
,
-
0.215
F
,
-
0.119
F
};
DTYPE
dedxAnswer
[
3
]
=
{
-
0.2
F
,
-
0.215
1
F
,
-
0.1192
F
};
/* CPU test */
/* CPU test */
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
g
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
g
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedy
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
dedy
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedx
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
dedx
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
/* initialize variables */
x
->
SetData
(
xData
,
sUnitNum
);
x
->
SetData
(
xData
,
unitNum
);
g
->
SetData
(
gData
,
sUnitNum
);
g
->
SetData
(
gData
,
unitNum
);
dedy
->
SetZeroAll
();
y
->
SetZeroAll
();
y
->
SetZeroAll
();
dedy
->
SetZeroAll
();
dedx
->
SetZeroAll
();
dedx
->
SetZeroAll
();
/* call Sigmoid function */
/* call Sigmoid function */
Sigmoid
(
x
,
y
);
Sigmoid
(
x
,
y
);
/* initialize variables */
dedy
->
SetData
(
dedyData
,
sUnitNum
);
/* call SigmoidBackward function */
/* call SigmoidBackward function */
SigmoidBackward
(
g
,
y
,
x
,
dedy
,
dedx
,
CROSSENTROPY
);
SigmoidBackward
(
g
,
y
,
x
,
dedy
,
dedx
,
CROSSENTROPY
);
/* check result */
/* check result */
cpuTest
=
y
->
CheckData
(
yAnswer
,
sUnitNum
)
&&
dedx
->
CheckData
(
dedxAnswer
,
sUnitNum
);
cpuTest
=
y
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedx
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
)
&&
dedy
->
CheckData
(
dedyAnswer
,
unitNum
,
1e-4
F
);
#ifdef USE_CUDA
#ifdef USE_CUDA
/* GPU test */
/* GPU test */
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
/* initialize variables */
xGPU
->
SetData
(
xData
,
sUnitNum
);
xGPU
->
SetData
(
xData
,
unitNum
);
gGPU
->
SetData
(
gData
,
sUnitNum
);
gGPU
->
SetData
(
gData
,
unitNum
);
dedyGPU
->
SetZeroAll
();
yGPU
->
SetZeroAll
();
yGPU
->
SetZeroAll
();
dedyGPU
->
SetZeroAll
();
dedxGPU
->
SetZeroAll
();
dedxGPU
->
SetZeroAll
();
/* call Sigmoid function */
/* call Sigmoid function */
...
@@ -202,8 +173,9 @@ bool TestSigmoid2()
...
@@ -202,8 +173,9 @@ bool TestSigmoid2()
SigmoidBackward
(
gGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
CROSSENTROPY
);
SigmoidBackward
(
gGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
CROSSENTROPY
);
/* check result */
/* check result */
gpuTest
=
yGPU
->
CheckData
(
yAnswer
,
sUnitNum
)
&&
dedxGPU
->
CheckData
(
dedxAnswer
,
sUnitNum
);
gpuTest
=
yGPU
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedxGPU
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
)
&&
dedyGPU
->
CheckData
(
dedyAnswer
,
unitNum
,
1e-4
F
);
/* destroy variables */
/* destroy variables */
delete
x
;
delete
x
;
delete
y
;
delete
y
;
...
@@ -215,7 +187,7 @@ bool TestSigmoid2()
...
@@ -215,7 +187,7 @@ bool TestSigmoid2()
delete
gGPU
;
delete
gGPU
;
delete
dedxGPU
;
delete
dedxGPU
;
delete
dedyGPU
;
delete
dedyGPU
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
&&
gpuTest
;
return
cpuTest
&&
gpuTest
;
#else
#else
...
@@ -225,7 +197,7 @@ bool TestSigmoid2()
...
@@ -225,7 +197,7 @@ bool TestSigmoid2()
delete
g
;
delete
g
;
delete
dedx
;
delete
dedx
;
delete
dedy
;
delete
dedy
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
;
return
cpuTest
;
#endif // USE_CUDA
#endif // USE_CUDA
...
@@ -252,6 +224,16 @@ bool TestSigmoid()
...
@@ -252,6 +224,16 @@ bool TestSigmoid()
else
else
XPRINT
(
0
,
stdout
,
">> case 1 passed!
\n
"
);
XPRINT
(
0
,
stdout
,
">> case 1 passed!
\n
"
);
/* case 2 test */
caseFlag
=
TestSigmoid2
();
if
(
!
caseFlag
)
{
returnFlag
=
false
;
XPRINT
(
0
,
stdout
,
">> case 2 failed!
\n
"
);
}
else
XPRINT
(
0
,
stdout
,
">> case 2 passed!
\n
"
);
/* other cases test */
/* other cases test */
/*
/*
TODO!!
TODO!!
...
...
source/test/TSoftmax.cpp
查看文件 @
003def3d
...
@@ -31,68 +31,69 @@ softmax function: y = e^x / \sum_{i} e^{x_i}
...
@@ -31,68 +31,69 @@ softmax function: y = e^x / \sum_{i} e^{x_i}
*/
*/
bool
TestSoftmax1
()
bool
TestSoftmax1
()
{
{
/* a
input
tensor of size (2, 3) */
/* a tensor of size (2, 3) */
int
sO
rder
=
2
;
int
o
rder
=
2
;
int
*
sDimSize
=
new
int
[
sO
rder
];
int
*
dimSize
=
new
int
[
o
rder
];
sD
imSize
[
0
]
=
2
;
d
imSize
[
0
]
=
2
;
sD
imSize
[
1
]
=
3
;
d
imSize
[
1
]
=
3
;
int
sU
nitNum
=
1
;
int
u
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
sO
rder
;
i
++
)
for
(
int
i
=
0
;
i
<
o
rder
;
i
++
)
sUnitNum
*=
sD
imSize
[
i
];
unitNum
*=
d
imSize
[
i
];
DTYPE
xData
[
2
][
3
]
=
{
{
0.0
F
,
1.0
F
,
2.0
F
},
DTYPE
xData
[
2
][
3
]
=
{
{
0.0
F
,
1.0
F
,
2.0
F
},
{
0.5
F
,
0.7
F
,
1.4
F
}
};
{
0.5
F
,
0.7
F
,
1.4
F
}
};
DTYPE
answer
[
2
][
3
]
=
{
{
0.0900
3057
F
,
0.24472848
F
,
0.66524094
F
},
DTYPE
answer
[
2
][
3
]
=
{
{
0.0900
F
,
0.2447
F
,
0.6652
F
},
{
0.2136
2929
F
,
0.2609274
F
,
0.52544326
F
}
};
{
0.2136
F
,
0.2609
F
,
0.5254
F
}
};
/* CPU test */
/* CPU test */
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
/* initialize variables */
x
->
SetData
(
xData
,
sU
nitNum
);
x
->
SetData
(
xData
,
u
nitNum
);
y
->
SetZeroAll
();
y
->
SetZeroAll
();
/* call Softmax function */
/* call Softmax function */
Softmax
(
x
,
y
,
1
);
Softmax
(
x
,
y
,
1
);
/* check result */
/* check result */
cpuTest
=
y
->
CheckData
(
answer
,
sUnitNum
);
cpuTest
=
y
->
CheckData
(
answer
,
unitNum
,
1e-4
F
);
#ifdef USE_CUDA
#ifdef USE_CUDA
/* GPU test */
/* GPU test */
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
/* initialize variables */
xGPU
->
SetData
(
xData
,
sU
nitNum
);
xGPU
->
SetData
(
xData
,
u
nitNum
);
yGPU
->
SetZeroAll
();
yGPU
->
SetZeroAll
();
/* call Softmax function */
/* call Softmax function */
Softmax
(
xGPU
,
yGPU
,
1
);
Softmax
(
xGPU
,
yGPU
,
1
);
/* check result */
/* check result */
gpuTest
=
yGPU
->
CheckData
(
answer
,
sUnitNum
,
0.001
F
);
gpuTest
=
yGPU
->
CheckData
(
answer
,
unitNum
,
1e-4
F
);
/* destroy variables */
/* destroy variables */
delete
x
;
delete
x
;
delete
y
;
delete
y
;
delete
xGPU
;
delete
xGPU
;
delete
yGPU
;
delete
yGPU
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
&&
gpuTest
;
return
cpuTest
&&
gpuTest
;
#else
#else
/* destroy variables */
/* destroy variables */
delete
x
,
y
;
delete
x
;
delete
[]
sDimSize
;
delete
y
;
delete
[]
dimSize
;
return
cpuTest
;
return
cpuTest
;
#endif // USE_CUDA
#endif // USE_CUDA
...
@@ -101,36 +102,38 @@ bool TestSoftmax1()
...
@@ -101,36 +102,38 @@ bool TestSoftmax1()
/*
/*
case 2: test SoftmaxBackward function.
case 2: test SoftmaxBackward function.
SoftmaxBackward function: dE/dx_j = -gold_j + y_j
SoftmaxBackward function: dE/dx_j = -gold_j + y_j
In this case, LossName=CROSSENTROPY.
*/
*/
bool
TestSoftmax2
()
bool
TestSoftmax2
()
{
{
/* a input tensor of size (2, 3) */
/* a input tensor of size (2, 3) */
int
sO
rder
=
2
;
int
o
rder
=
2
;
int
*
sDimSize
=
new
int
[
sO
rder
];
int
*
dimSize
=
new
int
[
o
rder
];
sD
imSize
[
0
]
=
1
;
d
imSize
[
0
]
=
1
;
sD
imSize
[
1
]
=
3
;
d
imSize
[
1
]
=
3
;
int
sU
nitNum
=
1
;
int
u
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
sO
rder
;
i
++
)
for
(
int
i
=
0
;
i
<
o
rder
;
i
++
)
sUnitNum
*=
sD
imSize
[
i
];
unitNum
*=
d
imSize
[
i
];
DTYPE
xData
[
1
][
3
]
=
{
{
0.0
F
,
1.0
F
,
2.0
F
}
};
DTYPE
xData
[
1
][
3
]
=
{
{
0.0
F
,
1.0
F
,
2.0
F
}
};
DTYPE
gData
[
1
][
3
]
=
{
{
0.0
F
,
0.0
F
,
1.0
F
}
};
DTYPE
gData
[
1
][
3
]
=
{
{
0.0
F
,
0.0
F
,
1.0
F
}
};
DTYPE
dedxAnswer
[
3
]
=
{
0.090031
F
,
0.244728
F
,
-
0.334759
F
};
DTYPE
yAnswer
[
1
][
3
]
=
{
{
0.0900
F
,
0.2447
F
,
0.6652
F
}
};
DTYPE
dedxAnswer
[
1
][
3
]
=
{
0.0900
F
,
0.2447
F
,
-
0.3347
F
};
/* CPU test */
/* CPU test */
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
g
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
g
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedy
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
dedy
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedx
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
dedx
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
/* initialize variables */
x
->
SetData
(
xData
,
sU
nitNum
);
x
->
SetData
(
xData
,
u
nitNum
);
g
->
SetData
(
gData
,
sU
nitNum
);
g
->
SetData
(
gData
,
u
nitNum
);
y
->
SetZeroAll
();
y
->
SetZeroAll
();
dedx
->
SetZeroAll
();
dedx
->
SetZeroAll
();
dedy
->
SetZeroAll
();
dedy
->
SetZeroAll
();
...
@@ -138,25 +141,27 @@ bool TestSoftmax2()
...
@@ -138,25 +141,27 @@ bool TestSoftmax2()
/* call Softmax function */
/* call Softmax function */
Softmax
(
x
,
y
,
1
);
Softmax
(
x
,
y
,
1
);
/* call SoftmaxBackward function */
SoftmaxBackward
(
g
,
y
,
x
,
dedy
,
dedx
,
1
,
CROSSENTROPY
);
SoftmaxBackward
(
g
,
y
,
x
,
dedy
,
dedx
,
1
,
CROSSENTROPY
);
/* check result */
/* check result */
cpuTest
=
dedx
->
CheckData
(
dedxAnswer
,
sUnitNum
,
0.001
F
);
cpuTest
=
y
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedx
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
);
#ifdef USE_CUDA
#ifdef USE_CUDA
/* GPU test */
/* GPU test */
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
/* initialize variables */
xGPU
->
SetData
(
xData
,
sU
nitNum
);
xGPU
->
SetData
(
xData
,
u
nitNum
);
gGPU
->
SetData
(
gData
,
sU
nitNum
);
gGPU
->
SetData
(
gData
,
u
nitNum
);
yGPU
->
SetZeroAll
();
yGPU
->
SetZeroAll
();
dedxGPU
->
SetZeroAll
();
dedxGPU
->
SetZeroAll
();
dedyGPU
->
SetZeroAll
();
dedyGPU
->
SetZeroAll
();
...
@@ -168,7 +173,8 @@ bool TestSoftmax2()
...
@@ -168,7 +173,8 @@ bool TestSoftmax2()
SoftmaxBackward
(
gGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
1
,
CROSSENTROPY
);
SoftmaxBackward
(
gGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
1
,
CROSSENTROPY
);
/* check result */
/* check result */
gpuTest
=
dedxGPU
->
CheckData
(
dedxAnswer
,
sUnitNum
,
0.001
F
);
gpuTest
=
yGPU
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedxGPU
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
);
/* destroy variables */
/* destroy variables */
delete
x
;
delete
x
;
...
@@ -181,7 +187,7 @@ bool TestSoftmax2()
...
@@ -181,7 +187,7 @@ bool TestSoftmax2()
delete
gGPU
;
delete
gGPU
;
delete
dedxGPU
;
delete
dedxGPU
;
delete
dedyGPU
;
delete
dedyGPU
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
&&
gpuTest
;
return
cpuTest
&&
gpuTest
;
#else
#else
...
@@ -191,7 +197,7 @@ bool TestSoftmax2()
...
@@ -191,7 +197,7 @@ bool TestSoftmax2()
delete
g
;
delete
g
;
delete
dedx
;
delete
dedx
;
delete
dedy
;
delete
dedy
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
;
return
cpuTest
;
#endif // USE_CUDA
#endif // USE_CUDA
...
...
source/test/TSplit.cpp
查看文件 @
003def3d
...
@@ -181,14 +181,20 @@ bool TestSplit2()
...
@@ -181,14 +181,20 @@ bool TestSplit2()
gpuTest
=
tGPU
->
CheckData
(
answer
,
tUnitNum
);
gpuTest
=
tGPU
->
CheckData
(
answer
,
tUnitNum
);
/* destroy variables */
/* destroy variables */
delete
s
,
t
,
sGPU
,
tGPU
;
delete
s
;
delete
[]
sDimSize
,
tDimSize
;
delete
t
;
delete
sGPU
;
delete
tGPU
;
delete
[]
sDimSize
;
delete
[]
tDimSize
;
return
cpuTest
&&
gpuTest
;
return
cpuTest
&&
gpuTest
;
#else
#else
/* destroy variables */
/* destroy variables */
delete
s
,
t
;
delete
s
;
delete
[]
sDimSize
,
tDimSize
;
delete
t
;
delete
[]
sDimSize
;
delete
[]
tDimSize
;
return
cpuTest
;
return
cpuTest
;
#endif // USE_CUDA
#endif // USE_CUDA
...
@@ -295,14 +301,25 @@ bool TestSplit3()
...
@@ -295,14 +301,25 @@ bool TestSplit3()
gpuTest
=
tGPU1
->
CheckData
(
answer1
,
tUnitNum1
)
&&
tGPU2
->
CheckData
(
answer2
,
tUnitNum2
);
gpuTest
=
tGPU1
->
CheckData
(
answer1
,
tUnitNum1
)
&&
tGPU2
->
CheckData
(
answer2
,
tUnitNum2
);
/* destroy variables */
/* destroy variables */
delete
s
,
t1
,
t2
,
sGPU
,
tGPU1
,
tGPU2
;
delete
s
;
delete
[]
sDimSize
,
tDimSize1
,
tDimSize2
;
delete
t1
;
delete
t2
;
delete
sGPU
;
delete
tGPU1
;
delete
tGPU2
;
delete
[]
sDimSize
;
delete
[]
tDimSize1
;
delete
[]
tDimSize2
;
return
cpuTest
&&
gpuTest
;
return
cpuTest
&&
gpuTest
;
#else
#else
/* destroy variables */
/* destroy variables */
delete
s
,
t1
,
t2
;
delete
s
;
delete
[]
sDimSize
,
tDimSize1
,
tDimSize2
;
delete
t1
;
delete
t2
;
delete
[]
sDimSize
;
delete
[]
tDimSize1
;
delete
[]
tDimSize2
;
return
cpuTest
;
return
cpuTest
;
#endif // USE_CUDA
#endif // USE_CUDA
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论