Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
N
NiuTrans.Tensor
概览
Overview
Details
Activity
Cycle Analytics
版本库
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
问题
0
Issues
0
列表
Board
标记
里程碑
合并请求
0
Merge Requests
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
Snippets
成员
Collapse sidebar
Close sidebar
活动
图像
聊天
创建新问题
作业
提交
Issue Boards
Open sidebar
Emmay
NiuTrans.Tensor
Commits
003def3d
Commit
003def3d
authored
Jul 11, 2018
by
xuchen
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
1.Update test case 2.Bug fixed in MatrxiMulBatched function 3.Bug fixed in SumByColumnVT function
parent
f12ced64
全部展开
显示空白字符变更
内嵌
并排
正在显示
15 个修改的文件
包含
517 行增加
和
276 行删除
+517
-276
source/core/arithmetic/MatrixMulBatched.cpp
+3
-3
source/core/arithmetic/SumByColumnVT.cu
+1
-1
source/test/TConcatenate.cpp
+3
-3
source/test/TCopyIndexed.cpp
+227
-1
source/test/THardTanH.cpp
+0
-0
source/test/TIdentity.cpp
+61
-50
source/test/TLogSoftmax.cpp
+0
-0
source/test/TLoss.cpp
+6
-7
source/test/TMatrixMulBatched.cpp
+1
-0
source/test/TRectify.cpp
+66
-68
source/test/TSetAscendingOrder.cpp
+1
-3
source/test/TSetData.cpp
+5
-2
source/test/TSigmoid.cpp
+65
-83
source/test/TSoftmax.cpp
+53
-47
source/test/TSplit.cpp
+25
-8
没有找到文件。
source/core/arithmetic/MatrixMulBatched.cpp
查看文件 @
003def3d
...
...
@@ -89,9 +89,9 @@ void MatrixMulBatched(XTensor * a, MATRIX_TRANS_TYPE transposedA,
void
*
ap
=
(
char
*
)
a
->
data
+
aRealBlockSize
*
p
;
void
*
bp
=
(
char
*
)
b
->
data
+
bRealBlockSize
*
p
;
void
*
cp
=
(
char
*
)
c
->
data
+
cRealBlockSize
*
p
;
XTensor
*
ai
=
new
XTensor
(
2
,
aDimSize
,
a
->
dataType
,
a
->
denseRatio
,
a
->
mem
);
XTensor
*
bi
=
new
XTensor
(
2
,
bDimSize
,
b
->
dataType
,
b
->
denseRatio
,
b
->
mem
);
XTensor
*
ci
=
new
XTensor
(
2
,
cDimSize
,
c
->
dataType
,
c
->
denseRatio
,
c
->
mem
);
XTensor
*
ai
=
NewTensor
(
2
,
aDimSize
,
a
->
dataType
,
a
->
denseRatio
,
a
->
devID
,
a
->
mem
);
XTensor
*
bi
=
NewTensor
(
2
,
bDimSize
,
b
->
dataType
,
b
->
denseRatio
,
a
->
devID
,
b
->
mem
);
XTensor
*
ci
=
NewTensor
(
2
,
cDimSize
,
c
->
dataType
,
c
->
denseRatio
,
a
->
devID
,
c
->
mem
);
ai
->
data
=
ap
;
bi
->
data
=
bp
;
ci
->
data
=
cp
;
...
...
source/core/arithmetic/SumByColumnVT.cu
查看文件 @
003def3d
...
...
@@ -52,7 +52,7 @@ void KernelADDByColumnVT(DTYPE * a, DTYPE * b, DTYPE * c, int colNum, int rowNum
DTYPE * bp = b + (rowNum * k + row) * colNum;
if (colNum % 4 == 0) {
for (int i = 0; i < colNum; i += 4)
sum += bp[i] + bp[i + 1] + b
[i + 2] + b
[i + 3];
sum += bp[i] + bp[i + 1] + b
p[i + 2] + bp
[i + 3];
}
else if (colNum % 2 == 0) {
for (int i = 0; i < colNum; i += 2)
...
...
source/test/TConcatenate.cpp
查看文件 @
003def3d
...
...
@@ -483,9 +483,9 @@ bool TestConcatenate4()
delete
sGPU1
;
delete
sGPU2
;
delete
tGPU
;
delete
[]
sDimSize1
;
delete
[]
sDimSize2
;
delete
[]
tDimSize
;
//
delete[] sDimSize1;
//
delete[] sDimSize2;
//
delete[] tDimSize;
return
cpuTest
&&
gpuTest
;
#else
...
...
source/test/TCopyIndexed.cpp
查看文件 @
003def3d
...
...
@@ -20,11 +20,12 @@
*/
#include "TCopyIndexed.h"
#include "../xc/Mycode.h"
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
/*
case 1 copy indexed sub-tensors
case 1
:
copy indexed sub-tensors
In this case, (3, 2, 3) -> (3, 2, 2), dim = 2, indexSize = 2,
srcIndex = [0, 2], tgtIndex = [0, 1], copyNum = 1.
*/
...
...
@@ -127,6 +128,213 @@ bool TestCopyIndexed1()
#endif // USE_CUDA
}
/*
case 2: copy indexed sub-tensors
In this case, (3, 2, 3) -> (3, 2, 2), dim = 2, indexSize = 2,
srcIndex = [0, 2], tgtIndex = [1, 0], copyNum = 1.
*/
bool
TestCopyIndexed2
()
{
/* a input tensor of size (3, 2, 3) */
int
sOrder
=
3
;
int
*
sDimSize
=
new
int
[
sOrder
];
sDimSize
[
0
]
=
3
;
sDimSize
[
1
]
=
2
;
sDimSize
[
2
]
=
3
;
int
sUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
sOrder
;
i
++
)
sUnitNum
*=
sDimSize
[
i
];
/* a output tensor of size (3, 2, 2) */
int
tOrder
=
3
;
int
*
tDimSize
=
new
int
[
tOrder
];
tDimSize
[
0
]
=
3
;
tDimSize
[
1
]
=
2
;
tDimSize
[
2
]
=
2
;
int
tUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
tOrder
;
i
++
)
tUnitNum
*=
tDimSize
[
i
];
DTYPE
sData
[
3
][
2
][
3
]
=
{
{
{
0.0
F
,
-
1.0
F
,
2.0
F
},
{
2.0
F
,
1.0
F
,
3.0
F
}
},
{
{
1.0
F
,
2.0
F
,
4.0
F
},
{
3.0
F
,
1.0
F
,
2.0
F
}},
{
{
-
1.0
F
,
3.0
F
,
2.0
F
},
{
1.0
F
,
-
1.0
F
,
0.0
F
}
}
};
DTYPE
answer
[
3
][
2
][
2
]
=
{
{
{
2.0
F
,
0.0
F
},
{
3.0
F
,
2.0
F
}
},
{
{
4.0
F
,
1.0
F
},
{
2.0
F
,
3.0
F
}},
{
{
2.0
F
,
-
1.0
F
},
{
0.0
F
,
1.0
F
}
}
};
int
dim
=
2
;
int
indexSize
=
2
;
int
srcIndex
[
2
]
=
{
0
,
2
};
int
tgtIndex
[
2
]
=
{
1
,
0
};
int
copyNum
=
1
;
/* CPU test */
bool
cpuTest
=
true
;
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
/* initialize variables */
s
->
SetData
(
sData
,
sUnitNum
);
t
->
SetZeroAll
();
/* call CopyIndexed function */
CopyIndexed
(
s
,
t
,
dim
,
srcIndex
,
indexSize
,
tgtIndex
,
copyNum
);
/* check results */
cpuTest
=
t
->
CheckData
(
answer
,
tUnitNum
);
#ifdef USE_CUDA
/* GPU test */
bool
gpuTest
=
true
;
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
sGPU
->
SetData
(
sData
,
sUnitNum
);
tGPU
->
SetZeroAll
();
/* call CopyIndexed function */
CopyIndexed
(
sGPU
,
tGPU
,
dim
,
srcIndex
,
indexSize
,
tgtIndex
,
copyNum
);
/* check results */
gpuTest
=
tGPU
->
CheckData
(
answer
,
tUnitNum
);
/* destroy variables */
delete
s
;
delete
t
;
delete
sGPU
;
delete
tGPU
;
delete
[]
sDimSize
;
delete
[]
tDimSize
;
return
cpuTest
&&
gpuTest
;
#else
/* destroy variables */
delete
s
;
delete
t
;
delete
[]
sDimSize
;
delete
[]
tDimSize
;
return
cpuTest
;
#endif // USE_CUDA
}
/*
case 3: copy indexed sub-tensors
In this case, (3, 2, 3) -> (3, 2, 2), dim = 2, indexSize = 1,
srcIndex = [0], tgtIndex = [0], copyNum = 2.
*/
bool
TestCopyIndexed3
()
{
/* a input tensor of size (3, 2, 3) */
int
sOrder
=
3
;
int
*
sDimSize
=
new
int
[
sOrder
];
sDimSize
[
0
]
=
3
;
sDimSize
[
1
]
=
2
;
sDimSize
[
2
]
=
3
;
int
sUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
sOrder
;
i
++
)
sUnitNum
*=
sDimSize
[
i
];
/* a output tensor of size (3, 2, 2) */
int
tOrder
=
3
;
int
*
tDimSize
=
new
int
[
tOrder
];
tDimSize
[
0
]
=
3
;
tDimSize
[
1
]
=
2
;
tDimSize
[
2
]
=
2
;
int
tUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
tOrder
;
i
++
)
tUnitNum
*=
tDimSize
[
i
];
DTYPE
sData
[
3
][
2
][
3
]
=
{
{
{
0.0
F
,
-
1.0
F
,
2.0
F
},
{
2.0
F
,
1.0
F
,
3.0
F
}
},
{
{
1.0
F
,
2.0
F
,
4.0
F
},
{
3.0
F
,
1.0
F
,
2.0
F
}},
{
{
-
1.0
F
,
3.0
F
,
2.0
F
},
{
1.0
F
,
-
1.0
F
,
0.0
F
}
}
};
DTYPE
answer
[
3
][
2
][
2
]
=
{
{
{
0.0
F
,
-
1.0
F
},
{
2.0
F
,
1.0
F
}
},
{
{
1.0
F
,
2.0
F
},
{
3.0
F
,
1.0
F
}},
{
{
-
1.0
F
,
3.0
F
},
{
1.0
F
,
-
1.0
F
}
}
};
int
dim
=
2
;
int
indexSize
=
1
;
int
srcIndex
[
1
]
=
{
0
};
int
tgtIndex
[
1
]
=
{
0
};
int
copyNum
=
2
;
/* CPU test */
bool
cpuTest
=
true
;
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
/* initialize variables */
s
->
SetData
(
sData
,
sUnitNum
);
t
->
SetZeroAll
();
/* call CopyIndexed function */
CopyIndexed
(
s
,
t
,
dim
,
srcIndex
,
indexSize
,
tgtIndex
,
copyNum
);
/* check results */
cpuTest
=
t
->
CheckData
(
answer
,
tUnitNum
);
#ifdef USE_CUDA
/* GPU test */
bool
gpuTest
=
true
;
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
sGPU
->
SetData
(
sData
,
sUnitNum
);
tGPU
->
SetZeroAll
();
/* call CopyIndexed function */
CopyIndexed
(
sGPU
,
tGPU
,
dim
,
srcIndex
,
indexSize
,
tgtIndex
,
copyNum
);
/* check results */
gpuTest
=
tGPU
->
CheckData
(
answer
,
tUnitNum
);
/* destroy variables */
delete
s
;
delete
t
;
delete
sGPU
;
delete
tGPU
;
delete
[]
sDimSize
;
delete
[]
tDimSize
;
return
cpuTest
&&
gpuTest
;
#else
/* destroy variables */
delete
s
;
delete
t
;
delete
[]
sDimSize
;
delete
[]
tDimSize
;
return
cpuTest
;
#endif // USE_CUDA
}
/* other cases */
/*
TODO!!
...
...
@@ -147,6 +355,24 @@ bool TestCopyIndexed()
else
XPRINT
(
0
,
stdout
,
">> case 1 passed!
\n
"
);
/* case 2 test */
caseFlag
=
TestCopyIndexed2
();
if
(
!
caseFlag
)
{
returnFlag
=
false
;
XPRINT
(
0
,
stdout
,
">> case 2 failed!
\n
"
);
}
else
XPRINT
(
0
,
stdout
,
">> case 2 passed!
\n
"
);
/* case 3 test */
caseFlag
=
TestCopyIndexed3
();
if
(
!
caseFlag
)
{
returnFlag
=
false
;
XPRINT
(
0
,
stdout
,
">> case 3 failed!
\n
"
);
}
else
XPRINT
(
0
,
stdout
,
">> case 3 passed!
\n
"
);
/* other cases test */
/*
TODO!!
...
...
source/test/THardTanH.cpp
查看文件 @
003def3d
差异被折叠。
点击展开。
source/test/TIdentity.cpp
查看文件 @
003def3d
...
...
@@ -30,15 +30,15 @@ Identity function: y = x
*/
bool
TestIdentity1
()
{
/* a
input
tensor of size (2, 3) */
int
sO
rder
=
2
;
int
*
sDimSize
=
new
int
[
sO
rder
];
sD
imSize
[
0
]
=
2
;
sD
imSize
[
1
]
=
3
;
/* a tensor of size (2, 3) */
int
o
rder
=
2
;
int
*
dimSize
=
new
int
[
o
rder
];
d
imSize
[
0
]
=
2
;
d
imSize
[
1
]
=
3
;
int
sU
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
sO
rder
;
i
++
)
sUnitNum
*=
sD
imSize
[
i
];
int
u
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
o
rder
;
i
++
)
unitNum
*=
d
imSize
[
i
];
DTYPE
xData
[
2
][
3
]
=
{
{
0.0
F
,
1.0
F
,
2.0
F
},
{
0.5
F
,
0.7
F
,
1.4
F
}
};
...
...
@@ -49,47 +49,50 @@ bool TestIdentity1()
bool
cpuTest
=
true
;
/* create tensors */
XTensor
*
x
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
y
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
x
->
SetData
(
xData
,
sU
nitNum
);
x
->
SetData
(
xData
,
u
nitNum
);
y
->
SetZeroAll
();
/* call Identity function */
Identity
(
x
,
y
);
/* check result */
cpuTest
=
y
->
CheckData
(
answer
,
sU
nitNum
);
cpuTest
=
y
->
CheckData
(
answer
,
u
nitNum
);
#ifdef USE_CUDA
/* GPU test */
bool
gpuTest
=
true
;
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
xGPU
->
SetData
(
xData
,
sU
nitNum
);
xGPU
->
SetData
(
xData
,
u
nitNum
);
yGPU
->
SetZeroAll
();
/* call Identity function */
Identity
(
xGPU
,
yGPU
);
/* check result */
gpuTest
=
yGPU
->
CheckData
(
answer
,
sU
nitNum
);
gpuTest
=
yGPU
->
CheckData
(
answer
,
u
nitNum
);
/* destroy variables */
delete
x
,
y
;
delete
xGPU
,
yGPU
;
delete
[]
sDimSize
;
delete
x
;
delete
y
;
delete
xGPU
;
delete
yGPU
;
delete
[]
dimSize
;
return
cpuTest
&&
gpuTest
;
#else
/* destroy variables */
delete
x
,
y
;
delete
[]
sDimSize
;
delete
x
;
delete
y
;
delete
[]
dimSize
;
return
cpuTest
;
#endif // USE_CUDA
...
...
@@ -98,35 +101,39 @@ bool TestIdentity1()
/*
case 2: test IdentityBackward function.
IdentityBackward function: dE/dx = dE/dy * dy/dx = dE/dy
In this case, lossName=CROSSENTROPY.
*/
bool
TestIdentity2
()
{
int
sOrder
=
2
;
int
*
sDimSize
=
new
int
[
sOrder
];
sDimSize
[
0
]
=
1
;
sDimSize
[
1
]
=
3
;
int
sUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
sOrder
;
i
++
)
sUnitNum
*=
sDimSize
[
i
];
DTYPE
xData
[
1
][
3
]
=
{
{
1.0
F
,
1.0
F
,
2.0
F
}
};
DTYPE
gData
[
1
][
3
]
=
{
{
0.0
F
,
0.0
F
,
1.0
F
}
};
/* a tensor of size (2, 3) */
int
order
=
2
;
int
*
dimSize
=
new
int
[
order
];
dimSize
[
0
]
=
1
;
dimSize
[
1
]
=
3
;
int
unitNum
=
1
;
for
(
int
i
=
0
;
i
<
order
;
i
++
)
unitNum
*=
dimSize
[
i
];
DTYPE
xData
[
3
]
=
{
1.0
F
,
1.0
F
,
2.0
F
};
DTYPE
gData
[
3
]
=
{
0.0
F
,
0.0
F
,
1.0
F
};
DTYPE
yAnswer
[
3
]
=
{
1.0
F
,
1.0
F
,
2.0
F
};
DTYPE
dedyAnswer
[
3
]
=
{
0.0
F
,
0.0
F
,
-
0.5
F
};
DTYPE
dedxAnswer
[
3
]
=
{
0.0
F
,
0.0
F
,
-
0.5
F
};
/* CPU test */
bool
cpuTest
=
true
;
/* create tensors */
XTensor
*
x
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
y
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
g
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
dedy
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
dedx
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
g
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedy
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedx
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
x
->
SetData
(
xData
,
sU
nitNum
);
g
->
SetData
(
gData
,
sU
nitNum
);
x
->
SetData
(
xData
,
u
nitNum
);
g
->
SetData
(
gData
,
u
nitNum
);
y
->
SetZeroAll
();
dedx
->
SetZeroAll
();
dedy
->
SetZeroAll
();
...
...
@@ -138,22 +145,24 @@ bool TestIdentity2()
IdentityBackward
(
g
,
y
,
x
,
dedy
,
dedx
,
CROSSENTROPY
);
/* check result */
cpuTest
=
dedx
->
CheckData
(
dedxAnswer
,
sUnitNum
,
1e-4
F
);
cpuTest
=
y
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedx
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
)
&&
dedy
->
CheckData
(
dedyAnswer
,
unitNum
,
1e-4
F
);
#ifdef USE_CUDA
/* GPU test */
bool
gpuTest
=
true
;
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
xGPU
->
SetData
(
xData
,
sU
nitNum
);
gGPU
->
SetData
(
gData
,
sU
nitNum
);
xGPU
->
SetData
(
xData
,
u
nitNum
);
gGPU
->
SetData
(
gData
,
u
nitNum
);
yGPU
->
SetZeroAll
();
dedxGPU
->
SetZeroAll
();
dedyGPU
->
SetZeroAll
();
...
...
@@ -165,7 +174,9 @@ bool TestIdentity2()
IdentityBackward
(
gGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
CROSSENTROPY
);
/* check result */
gpuTest
=
dedxGPU
->
CheckData
(
dedxAnswer
,
sUnitNum
,
1e-4
F
);
gpuTest
=
yGPU
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedxGPU
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
)
&&
dedyGPU
->
CheckData
(
dedyAnswer
,
unitNum
,
1e-4
F
);
/* destroy variables */
delete
x
;
...
...
@@ -178,7 +189,7 @@ bool TestIdentity2()
delete
gGPU
;
delete
dedxGPU
;
delete
dedyGPU
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
&&
gpuTest
;
#else
...
...
@@ -188,7 +199,7 @@ bool TestIdentity2()
delete
g
;
delete
dedx
;
delete
dedy
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
;
#endif // USE_CUDA
...
...
source/test/TLogSoftmax.cpp
查看文件 @
003def3d
差异被折叠。
点击展开。
source/test/TLoss.cpp
查看文件 @
003def3d
...
...
@@ -20,16 +20,15 @@
*/
#include "../core/math/ScaleAndShift.h"
#include "../function/Loss.h"
#include "TLoss.h"
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
/*
case 1: test LossCompute function
case 1: test LossCompute function
.
In this case, Loss function name = SQUAREDERROR.
loss = sum_{i} 0.5*(t_i - y_i)^2,
where t_i is the gold standard and y_i is the model output
where t_i is the gold standard and y_i is the model output
.
*/
bool
TestLoss1
()
{
...
...
@@ -103,10 +102,10 @@ bool TestLoss1()
}
/*
case 2: test LossCompute function
case 2: test LossCompute function
.
In this case, Loss function name = CROSSENTROPY.
loss = sum_{i} (-t_i * log(y_i))
where t_i is the gold standard and y_i is the model output
where t_i is the gold standard and y_i is the model output
.
*/
bool
TestLoss2
()
{
...
...
@@ -180,10 +179,10 @@ bool TestLoss2()
}
/*
case 3: test LossCompute function
case 3: test LossCompute function
.
In this case, Loss function name = ONEHOTERROR.
loss = sum_{i} e_i
where e_i = 0.5*(t_i - y_i)^2 if t_i = 1, e_i = 0 otherwise
where e_i = 0.5*(t_i - y_i)^2 if t_i = 1, e_i = 0 otherwise
.
*/
bool
TestLoss3
()
{
...
...
source/test/TMatrixMulBatched.cpp
查看文件 @
003def3d
...
...
@@ -19,6 +19,7 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15
*/
#include "../XTensor.h"
#include "TMatrixMulBatched.h"
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
...
...
source/test/TRectify.cpp
查看文件 @
003def3d
...
...
@@ -29,25 +29,15 @@ In this case, y = max(0, x)
*/
bool
TestRectify1
()
{
/* a x tensor of size (2, 3) */
int
xOrder
=
2
;
int
*
xDimSize
=
new
int
[
xOrder
];
xDimSize
[
0
]
=
2
;
xDimSize
[
1
]
=
3
;
int
xUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
xOrder
;
i
++
)
xUnitNum
*=
xDimSize
[
i
];
/* a y tensor of size (2, 3) */
int
yOrder
=
2
;
int
*
yDimSize
=
new
int
[
yOrder
];
yDimSize
[
0
]
=
2
;
yDimSize
[
1
]
=
3
;
int
yUnitNum
=
1
;
for
(
int
i
=
0
;
i
<
yOrder
;
i
++
)
yUnitNum
*=
yDimSize
[
i
];
/* a tensor of size (2, 3) */
int
order
=
2
;
int
*
dimSize
=
new
int
[
order
];
dimSize
[
0
]
=
2
;
dimSize
[
1
]
=
3
;
int
unitNum
=
1
;
for
(
int
i
=
0
;
i
<
order
;
i
++
)
unitNum
*=
dimSize
[
i
];
DTYPE
xData
[
2
][
3
]
=
{
{
0.0
F
,
-
1.0
F
,
2.0
F
},
{
3.0
F
,
-
4.0
F
,
-
5.0
F
}
};
...
...
@@ -58,52 +48,50 @@ bool TestRectify1()
bool
cpuTest
=
true
;
/* create tensors */
XTensor
*
x
=
NewTensor
(
xOrder
,
xD
imSize
);
XTensor
*
y
=
NewTensor
(
yOrder
,
yD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
x
->
SetData
(
xData
,
xU
nitNum
);
x
->
SetData
(
xData
,
u
nitNum
);
y
->
SetZeroAll
();
/* call Rectify function */
Rectify
(
x
,
y
);
/* check results */
cpuTest
=
y
->
CheckData
(
answer
,
yU
nitNum
);
cpuTest
=
y
->
CheckData
(
answer
,
u
nitNum
);
#ifdef USE_CUDA
/* GPU test */
bool
gpuTest
=
true
;
/* create tensor */
XTensor
*
xGPU
=
NewTensor
(
xOrder
,
xD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
yOrder
,
yD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* Initialize variables */
xGPU
->
SetData
(
xData
,
xU
nitNum
);
xGPU
->
SetData
(
xData
,
u
nitNum
);
yGPU
->
SetZeroAll
();
/* call Rectify function */
Rectify
(
xGPU
,
yGPU
);
/* check results */
gpuTest
=
yGPU
->
CheckData
(
answer
,
yU
nitNum
);
gpuTest
=
yGPU
->
CheckData
(
answer
,
u
nitNum
);
/* destroy variables */
delete
x
;
delete
y
;
delete
xGPU
;
delete
yGPU
;
delete
[]
xDimSize
;
delete
[]
yDimSize
;
delete
[]
dimSize
;
return
cpuTest
&&
gpuTest
;
#else
/* destroy variables */
delete
x
;
delete
y
;
delete
[]
xDimSize
;
delete
[]
yDimSize
;
delete
[]
dimSize
;
return
cpuTest
;
#endif // USE_CUDA
...
...
@@ -117,73 +105,83 @@ In this case, lossName=CROSSENTROPY.
*/
bool
TestRectify2
()
{
/* a
x
tensor of size (2, 3) */
int
xO
rder
=
2
;
int
*
xDimSize
=
new
int
[
xO
rder
];
xD
imSize
[
0
]
=
2
;
xD
imSize
[
1
]
=
3
;
/* a tensor of size (2, 3) */
int
o
rder
=
2
;
int
*
dimSize
=
new
int
[
o
rder
];
d
imSize
[
0
]
=
2
;
d
imSize
[
1
]
=
3
;
int
xU
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
xO
rder
;
i
++
)
xUnitNum
*=
xD
imSize
[
i
];
int
u
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
o
rder
;
i
++
)
unitNum
*=
d
imSize
[
i
];
DTYPE
xData
[
2
][
3
]
=
{
{
1.0
F
,
1.0
F
,
2.0
F
},
{
2.0
F
,
4.0
F
,
5.0
F
}
};
DTYPE
yData
[
2
][
3
]
=
{
{
1.0
F
,
1.0
F
,
2.0
F
},
{
2.0
F
,
4.0
F
,
5.0
F
}
};
DTYPE
goldData
[
2
][
3
]
=
{
{
1.0
F
,
1.0
F
,
1.0
F
},
{
1.0
F
,
1.0
F
,
1.0
F
}
};
DTYPE
dedyData
[
2
][
3
]
=
{
{
-
1.0
F
,
-
1.0
F
,
-
0.5
F
},
DTYPE
yAnswer
[
2
][
3
]
=
{
{
1.0
F
,
1.0
F
,
2.0
F
},
{
2.0
F
,
4.0
F
,
5.0
F
}
};
DTYPE
dedyAnswer
[
2
][
3
]
=
{
{
-
1.0
F
,
-
1.0
F
,
-
0.5
F
},
{
-
0.5
F
,
-
0.25
F
,
-
0.2
F
}
};
DTYPE
a
nswer
[
2
][
3
]
=
{
{
-
1.0
F
,
-
1.0
F
,
-
0.5
F
},
DTYPE
dedxA
nswer
[
2
][
3
]
=
{
{
-
1.0
F
,
-
1.0
F
,
-
0.5
F
},
{
-
0.5
F
,
-
0.25
F
,
-
0.2
F
}
};
/* CPU test */
bool
cpuTest
=
true
;
/* create tensors */
XTensor
*
x
=
NewTensor
(
xOrder
,
xD
imSize
);
XTensor
*
y
=
NewTensor
(
xOrder
,
xD
imSize
);
XTensor
*
gold
=
NewTensor
(
xOrder
,
xD
imSize
);
XTensor
*
dedy
=
NewTensor
(
xOrder
,
xD
imSize
);
XTensor
*
dedx
=
NewTensor
(
xOrder
,
xD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
gold
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedy
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedx
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
x
->
SetData
(
xData
,
xU
nitNum
);
y
->
SetData
(
yData
,
xU
nitNum
);
gold
->
SetData
(
goldData
,
xUnitNum
);
dedy
->
Set
Data
(
dedyData
,
xUnitNum
);
x
->
SetData
(
xData
,
u
nitNum
);
gold
->
SetData
(
goldData
,
u
nitNum
);
y
->
SetZeroAll
(
);
dedy
->
Set
ZeroAll
(
);
dedx
->
SetZeroAll
();
/* call Rectify function */
Rectify
(
x
,
y
);
/* call RectifyBackward function */
RectifyBackward
(
gold
,
y
,
x
,
dedy
,
dedx
,
NOLOSS
);
RectifyBackward
(
gold
,
y
,
x
,
dedy
,
dedx
,
CROSSENTROPY
);
/* check results */
cpuTest
=
dedx
->
CheckData
(
answer
,
xUnitNum
);
cpuTest
=
y
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedx
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
)
&&
dedy
->
CheckData
(
dedyAnswer
,
unitNum
,
1e-4
F
);
#ifdef USE_CUDA
/* GPU test */
bool
gpuTest
=
true
;
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
xOrder
,
xD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
xOrder
,
xD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
(
xOrder
,
xD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
xOrder
,
xD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
xOrder
,
xD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
xGPU
->
SetData
(
xData
,
xU
nitNum
);
yGPU
->
SetData
(
yData
,
xU
nitNum
);
goldGPU
->
SetData
(
goldData
,
xUnitNum
);
dedyGPU
->
Set
Data
(
dedyData
,
xUnitNum
);
xGPU
->
SetData
(
xData
,
u
nitNum
);
goldGPU
->
SetData
(
goldData
,
u
nitNum
);
yGPU
->
SetZeroAll
(
);
dedyGPU
->
Set
ZeroAll
(
);
dedxGPU
->
SetZeroAll
();
/* call Rectify function */
Rectify
(
xGPU
,
yGPU
);
/* call rectifybackward function */
RectifyBackward
(
goldGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
NOLOSS
);
RectifyBackward
(
goldGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
CROSSENTROPY
);
/* check results */
gpuTest
=
dedxGPU
->
CheckData
(
answer
,
xUnitNum
);
gpuTest
=
yGPU
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedxGPU
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
)
&&
dedyGPU
->
CheckData
(
dedyAnswer
,
unitNum
,
1e-4
F
);
/* destroy variables */
delete
x
;
...
...
@@ -196,7 +194,7 @@ bool TestRectify2()
delete
dedyGPU
;
delete
dedxGPU
;
delete
goldGPU
;
delete
[]
xD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
&&
gpuTest
;
#else
...
...
@@ -206,7 +204,7 @@ bool TestRectify2()
delete
dedy
;
delete
dedx
;
delete
gold
;
delete
[]
xD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
;
#endif // USE_CUDA
...
...
@@ -220,7 +218,7 @@ TODO!!
/* test for Rectify Function */
bool
TestRectify
()
{
XPRINT
(
0
,
stdout
,
"[TEST RECTIFY]
test rectify
and its backward computation
\n
"
);
XPRINT
(
0
,
stdout
,
"[TEST RECTIFY]
rectify function
and its backward computation
\n
"
);
bool
returnFlag
=
true
,
caseFlag
=
true
;
/* case 1 test */
...
...
source/test/TSetAscendingOrder.cpp
查看文件 @
003def3d
...
...
@@ -23,8 +23,7 @@
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
/* case 1: set the cell to the ascending order along a given dimension.
*/
/* case 1: set the cell to the ascending order along a given dimension. */
bool
TestSetAscendingOrder1
()
{
/* a input tensor of size (2, 4) */
...
...
@@ -50,7 +49,6 @@ bool TestSetAscendingOrder1()
s
->
SetZeroAll
();
/* call SetAscendingOrder function */
s
->
SetAscendingOrder
(
1
);
/* check results */
...
...
source/test/TSetData.cpp
查看文件 @
003def3d
...
...
@@ -23,7 +23,10 @@
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
/* case 1: set the cell to the ascending order along a given dimension. */
/*
case 1: test SetDataRand function.
set the tensor items by a uniform distribution in range [lower, upper].
*/
bool
TestSetData1
()
{
/* a input tensor of size (2, 4) */
...
...
@@ -44,7 +47,7 @@ bool TestSetData1()
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
/* call SetData function */
/* call SetData
Rand
function */
s
->
SetDataRand
(
0.0
,
1.0
);
/* check results */
...
...
source/test/TSigmoid.cpp
查看文件 @
003def3d
...
...
@@ -25,102 +25,71 @@
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
/*
case 1: test Sigmoid function
and SigmoidBackward function
.
case 1: test Sigmoid function.
sigmoid function: y = 1/(1+exp(-x))
backward computation: dE/ds = dE/dy * dy/dx
*/
bool
TestSigmoid1
()
{
/* a input tensor of size (3) */
int
sO
rder
=
1
;
int
*
sDimSize
=
new
int
[
sO
rder
];
sD
imSize
[
0
]
=
3
;
int
o
rder
=
1
;
int
*
dimSize
=
new
int
[
o
rder
];
d
imSize
[
0
]
=
3
;
int
sU
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
sO
rder
;
i
++
)
sUnitNum
*=
sD
imSize
[
i
];
int
u
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
o
rder
;
i
++
)
unitNum
*=
d
imSize
[
i
];
DTYPE
xData
[
3
]
=
{
0.0
F
,
1.0
F
,
2.0
F
};
DTYPE
gData
[
3
]
=
{
0.4
F
,
0.8
F
,
1.0
F
};
DTYPE
dedyData
[
3
]
=
{
-
0.8
F
,
-
1.094
F
,
-
1.135
F
};
DTYPE
yAnswer
[
3
]
=
{
0.5
F
,
0.731
F
,
0.881
F
};
DTYPE
dedxAnswer
[
3
]
=
{
-
0.2
F
,
-
0.215
F
,
-
0.119
F
};
DTYPE
answer
[
3
]
=
{
0.5
F
,
0.7311
F
,
0.8808
F
};
/* CPU test */
bool
cpuTest
=
true
;
/* create tensors */
XTensor
*
x
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
y
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
g
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
dedy
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
dedx
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
x
=
NewTensor
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
(
order
,
dimSize
);
/* initialize variables */
x
->
SetData
(
xData
,
sUnitNum
);
g
->
SetData
(
gData
,
sUnitNum
);
dedy
->
SetData
(
dedyData
,
sUnitNum
);
x
->
SetData
(
xData
,
unitNum
);
y
->
SetZeroAll
();
dedx
->
SetZeroAll
();
/* call Sigmoid function */
Sigmoid
(
x
,
y
);
/* call SigmoidBackward function */
SigmoidBackward
(
g
,
y
,
x
,
dedy
,
dedx
,
NOLOSS
);
/* check result */
cpuTest
=
y
->
CheckData
(
yAnswer
,
sUnitNum
,
0.001
F
)
&&
dedx
->
CheckData
(
dedxAnswer
,
sUnitNum
,
0.001
F
);
cpuTest
=
y
->
CheckData
(
answer
,
unitNum
,
1e-4
F
);
#ifdef USE_CUDA
/* GPU test */
bool
gpuTest
=
true
;
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
xGPU
->
SetData
(
xData
,
sUnitNum
);
gGPU
->
SetData
(
gData
,
sUnitNum
);
dedyGPU
->
SetData
(
dedyData
,
sUnitNum
);
xGPU
->
SetData
(
xData
,
unitNum
);
yGPU
->
SetZeroAll
();
dedxGPU
->
SetZeroAll
();
/* call Sigmoid function */
Sigmoid
(
xGPU
,
yGPU
);
/* call SigmoidBackward function */
SigmoidBackward
(
gGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
NOLOSS
);
/* check result */
gpuTest
=
yGPU
->
CheckData
(
yAnswer
,
sUnitNum
,
0.001
F
)
&&
dedxGPU
->
CheckData
(
dedxAnswer
,
sUnitNum
,
0.001
F
);
gpuTest
=
yGPU
->
CheckData
(
answer
,
unitNum
,
1e-4
F
);
/* destroy variables */
delete
x
;
delete
y
;
delete
g
;
delete
dedx
;
delete
dedy
;
delete
xGPU
;
delete
yGPU
;
delete
gGPU
;
delete
dedxGPU
;
delete
dedyGPU
;
delete
[]
sDimSize
;
delete
[]
dimSize
;
return
cpuTest
&&
gpuTest
;
#else
/* destroy variables */
delete
x
;
delete
y
;
delete
g
;
delete
dedx
;
delete
dedy
;
delete
[]
sDimSize
;
delete
[]
dimSize
;
return
cpuTest
;
#endif // USE_CUDA
...
...
@@ -129,70 +98,72 @@ bool TestSigmoid1()
/*
case 2: test Sigmoid function and SigmoidBackward function.
sigmoid function: y = 1/(1+exp(-x))
backward computation: dE/ds = dE/dy * dy/dx
backward computation:
dE/ds = dE/dy * dy/dx
dy/dx = y * (1 -y)
In this case, LossName=CROSSENTROPY.
*/
bool
TestSigmoid2
()
{
/* a input tensor of size (3) */
int
sO
rder
=
1
;
int
*
sDimSize
=
new
int
[
sO
rder
];
sD
imSize
[
0
]
=
3
;
int
o
rder
=
1
;
int
*
dimSize
=
new
int
[
o
rder
];
d
imSize
[
0
]
=
3
;
int
sU
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
sO
rder
;
i
++
)
sUnitNum
*=
sD
imSize
[
i
];
int
u
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
o
rder
;
i
++
)
unitNum
*=
d
imSize
[
i
];
DTYPE
xData
[
3
]
=
{
0.0
F
,
1.0
F
,
2.0
F
};
DTYPE
gData
[
3
]
=
{
0.4
F
,
0.8
F
,
1.0
F
};
DTYPE
dedyData
[
3
]
=
{
-
0.8
F
,
-
1.094
F
,
-
1.135
F
};
DTYPE
yAnswer
[
3
]
=
{
0.5
F
,
0.731
F
,
0.881
F
};
DTYPE
dedxAnswer
[
3
]
=
{
-
0.2
F
,
-
0.215
F
,
-
0.119
F
};
DTYPE
yAnswer
[
3
]
=
{
0.5
F
,
0.7311
F
,
0.8808
F
};
DTYPE
dedyAnswer
[
3
]
=
{
-
0.8
F
,
-
1.0943
F
,
-
1.1353
F
};
DTYPE
dedxAnswer
[
3
]
=
{
-
0.2
F
,
-
0.215
1
F
,
-
0.1192
F
};
/* CPU test */
bool
cpuTest
=
true
;
/* create tensors */
XTensor
*
x
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
y
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
g
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
dedy
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
dedx
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
g
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedy
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedx
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
x
->
SetData
(
xData
,
sUnitNum
);
g
->
SetData
(
gData
,
sUnitNum
);
dedy
->
SetZeroAll
();
x
->
SetData
(
xData
,
unitNum
);
g
->
SetData
(
gData
,
unitNum
);
y
->
SetZeroAll
();
dedy
->
SetZeroAll
();
dedx
->
SetZeroAll
();
/* call Sigmoid function */
Sigmoid
(
x
,
y
);
/* initialize variables */
dedy
->
SetData
(
dedyData
,
sUnitNum
);
/* call SigmoidBackward function */
SigmoidBackward
(
g
,
y
,
x
,
dedy
,
dedx
,
CROSSENTROPY
);
/* check result */
cpuTest
=
y
->
CheckData
(
yAnswer
,
sUnitNum
)
&&
dedx
->
CheckData
(
dedxAnswer
,
sUnitNum
);
cpuTest
=
y
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedx
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
)
&&
dedy
->
CheckData
(
dedyAnswer
,
unitNum
,
1e-4
F
);
#ifdef USE_CUDA
/* GPU test */
bool
gpuTest
=
true
;
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
xGPU
->
SetData
(
xData
,
sUnitNum
);
gGPU
->
SetData
(
gData
,
sUnitNum
);
dedyGPU
->
SetZeroAll
();
xGPU
->
SetData
(
xData
,
unitNum
);
gGPU
->
SetData
(
gData
,
unitNum
);
yGPU
->
SetZeroAll
();
dedyGPU
->
SetZeroAll
();
dedxGPU
->
SetZeroAll
();
/* call Sigmoid function */
...
...
@@ -202,8 +173,9 @@ bool TestSigmoid2()
SigmoidBackward
(
gGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
CROSSENTROPY
);
/* check result */
gpuTest
=
yGPU
->
CheckData
(
yAnswer
,
sUnitNum
)
&&
dedxGPU
->
CheckData
(
dedxAnswer
,
sUnitNum
);
gpuTest
=
yGPU
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedxGPU
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
)
&&
dedyGPU
->
CheckData
(
dedyAnswer
,
unitNum
,
1e-4
F
);
/* destroy variables */
delete
x
;
delete
y
;
...
...
@@ -215,7 +187,7 @@ bool TestSigmoid2()
delete
gGPU
;
delete
dedxGPU
;
delete
dedyGPU
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
&&
gpuTest
;
#else
...
...
@@ -225,7 +197,7 @@ bool TestSigmoid2()
delete
g
;
delete
dedx
;
delete
dedy
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
;
#endif // USE_CUDA
...
...
@@ -252,6 +224,16 @@ bool TestSigmoid()
else
XPRINT
(
0
,
stdout
,
">> case 1 passed!
\n
"
);
/* case 2 test */
caseFlag
=
TestSigmoid2
();
if
(
!
caseFlag
)
{
returnFlag
=
false
;
XPRINT
(
0
,
stdout
,
">> case 2 failed!
\n
"
);
}
else
XPRINT
(
0
,
stdout
,
">> case 2 passed!
\n
"
);
/* other cases test */
/*
TODO!!
...
...
source/test/TSoftmax.cpp
查看文件 @
003def3d
...
...
@@ -31,68 +31,69 @@ softmax function: y = e^x / \sum_{i} e^{x_i}
*/
bool
TestSoftmax1
()
{
/* a
input
tensor of size (2, 3) */
int
sO
rder
=
2
;
int
*
sDimSize
=
new
int
[
sO
rder
];
sD
imSize
[
0
]
=
2
;
sD
imSize
[
1
]
=
3
;
/* a tensor of size (2, 3) */
int
o
rder
=
2
;
int
*
dimSize
=
new
int
[
o
rder
];
d
imSize
[
0
]
=
2
;
d
imSize
[
1
]
=
3
;
int
sU
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
sO
rder
;
i
++
)
sUnitNum
*=
sD
imSize
[
i
];
int
u
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
o
rder
;
i
++
)
unitNum
*=
d
imSize
[
i
];
DTYPE
xData
[
2
][
3
]
=
{
{
0.0
F
,
1.0
F
,
2.0
F
},
{
0.5
F
,
0.7
F
,
1.4
F
}
};
DTYPE
answer
[
2
][
3
]
=
{
{
0.0900
3057
F
,
0.24472848
F
,
0.66524094
F
},
{
0.2136
2929
F
,
0.2609274
F
,
0.52544326
F
}
};
DTYPE
answer
[
2
][
3
]
=
{
{
0.0900
F
,
0.2447
F
,
0.6652
F
},
{
0.2136
F
,
0.2609
F
,
0.5254
F
}
};
/* CPU test */
bool
cpuTest
=
true
;
/* create tensors */
XTensor
*
x
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
y
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
x
->
SetData
(
xData
,
sU
nitNum
);
x
->
SetData
(
xData
,
u
nitNum
);
y
->
SetZeroAll
();
/* call Softmax function */
Softmax
(
x
,
y
,
1
);
/* check result */
cpuTest
=
y
->
CheckData
(
answer
,
sUnitNum
);
cpuTest
=
y
->
CheckData
(
answer
,
unitNum
,
1e-4
F
);
#ifdef USE_CUDA
/* GPU test */
bool
gpuTest
=
true
;
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
xGPU
->
SetData
(
xData
,
sU
nitNum
);
xGPU
->
SetData
(
xData
,
u
nitNum
);
yGPU
->
SetZeroAll
();
/* call Softmax function */
Softmax
(
xGPU
,
yGPU
,
1
);
/* check result */
gpuTest
=
yGPU
->
CheckData
(
answer
,
sUnitNum
,
0.001
F
);
gpuTest
=
yGPU
->
CheckData
(
answer
,
unitNum
,
1e-4
F
);
/* destroy variables */
delete
x
;
delete
y
;
delete
xGPU
;
delete
yGPU
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
&&
gpuTest
;
#else
/* destroy variables */
delete
x
,
y
;
delete
[]
sDimSize
;
delete
x
;
delete
y
;
delete
[]
dimSize
;
return
cpuTest
;
#endif // USE_CUDA
...
...
@@ -101,36 +102,38 @@ bool TestSoftmax1()
/*
case 2: test SoftmaxBackward function.
SoftmaxBackward function: dE/dx_j = -gold_j + y_j
In this case, LossName=CROSSENTROPY.
*/
bool
TestSoftmax2
()
{
/* a input tensor of size (2, 3) */
int
sO
rder
=
2
;
int
*
sDimSize
=
new
int
[
sO
rder
];
sD
imSize
[
0
]
=
1
;
sD
imSize
[
1
]
=
3
;
int
o
rder
=
2
;
int
*
dimSize
=
new
int
[
o
rder
];
d
imSize
[
0
]
=
1
;
d
imSize
[
1
]
=
3
;
int
sU
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
sO
rder
;
i
++
)
sUnitNum
*=
sD
imSize
[
i
];
int
u
nitNum
=
1
;
for
(
int
i
=
0
;
i
<
o
rder
;
i
++
)
unitNum
*=
d
imSize
[
i
];
DTYPE
xData
[
1
][
3
]
=
{
{
0.0
F
,
1.0
F
,
2.0
F
}
};
DTYPE
gData
[
1
][
3
]
=
{
{
0.0
F
,
0.0
F
,
1.0
F
}
};
DTYPE
dedxAnswer
[
3
]
=
{
0.090031
F
,
0.244728
F
,
-
0.334759
F
};
DTYPE
yAnswer
[
1
][
3
]
=
{
{
0.0900
F
,
0.2447
F
,
0.6652
F
}
};
DTYPE
dedxAnswer
[
1
][
3
]
=
{
0.0900
F
,
0.2447
F
,
-
0.3347
F
};
/* CPU test */
bool
cpuTest
=
true
;
/* create tensors */
XTensor
*
x
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
y
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
g
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
dedy
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
dedx
=
NewTensor
(
sOrder
,
sD
imSize
);
XTensor
*
x
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
y
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
g
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedy
=
NewTensor
(
order
,
d
imSize
);
XTensor
*
dedx
=
NewTensor
(
order
,
d
imSize
);
/* initialize variables */
x
->
SetData
(
xData
,
sU
nitNum
);
g
->
SetData
(
gData
,
sU
nitNum
);
x
->
SetData
(
xData
,
u
nitNum
);
g
->
SetData
(
gData
,
u
nitNum
);
y
->
SetZeroAll
();
dedx
->
SetZeroAll
();
dedy
->
SetZeroAll
();
...
...
@@ -138,25 +141,27 @@ bool TestSoftmax2()
/* call Softmax function */
Softmax
(
x
,
y
,
1
);
/* call SoftmaxBackward function */
SoftmaxBackward
(
g
,
y
,
x
,
dedy
,
dedx
,
1
,
CROSSENTROPY
);
/* check result */
cpuTest
=
dedx
->
CheckData
(
dedxAnswer
,
sUnitNum
,
0.001
F
);
cpuTest
=
y
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedx
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
);
#ifdef USE_CUDA
/* GPU test */
bool
gpuTest
=
true
;
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
sOrder
,
sD
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
order
,
d
imSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
xGPU
->
SetData
(
xData
,
sU
nitNum
);
gGPU
->
SetData
(
gData
,
sU
nitNum
);
xGPU
->
SetData
(
xData
,
u
nitNum
);
gGPU
->
SetData
(
gData
,
u
nitNum
);
yGPU
->
SetZeroAll
();
dedxGPU
->
SetZeroAll
();
dedyGPU
->
SetZeroAll
();
...
...
@@ -168,7 +173,8 @@ bool TestSoftmax2()
SoftmaxBackward
(
gGPU
,
yGPU
,
xGPU
,
dedyGPU
,
dedxGPU
,
1
,
CROSSENTROPY
);
/* check result */
gpuTest
=
dedxGPU
->
CheckData
(
dedxAnswer
,
sUnitNum
,
0.001
F
);
gpuTest
=
yGPU
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedxGPU
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
);
/* destroy variables */
delete
x
;
...
...
@@ -181,7 +187,7 @@ bool TestSoftmax2()
delete
gGPU
;
delete
dedxGPU
;
delete
dedyGPU
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
&&
gpuTest
;
#else
...
...
@@ -191,7 +197,7 @@ bool TestSoftmax2()
delete
g
;
delete
dedx
;
delete
dedy
;
delete
[]
sD
imSize
;
delete
[]
d
imSize
;
return
cpuTest
;
#endif // USE_CUDA
...
...
source/test/TSplit.cpp
查看文件 @
003def3d
...
...
@@ -181,14 +181,20 @@ bool TestSplit2()
gpuTest
=
tGPU
->
CheckData
(
answer
,
tUnitNum
);
/* destroy variables */
delete
s
,
t
,
sGPU
,
tGPU
;
delete
[]
sDimSize
,
tDimSize
;
delete
s
;
delete
t
;
delete
sGPU
;
delete
tGPU
;
delete
[]
sDimSize
;
delete
[]
tDimSize
;
return
cpuTest
&&
gpuTest
;
#else
/* destroy variables */
delete
s
,
t
;
delete
[]
sDimSize
,
tDimSize
;
delete
s
;
delete
t
;
delete
[]
sDimSize
;
delete
[]
tDimSize
;
return
cpuTest
;
#endif // USE_CUDA
...
...
@@ -295,14 +301,25 @@ bool TestSplit3()
gpuTest
=
tGPU1
->
CheckData
(
answer1
,
tUnitNum1
)
&&
tGPU2
->
CheckData
(
answer2
,
tUnitNum2
);
/* destroy variables */
delete
s
,
t1
,
t2
,
sGPU
,
tGPU1
,
tGPU2
;
delete
[]
sDimSize
,
tDimSize1
,
tDimSize2
;
delete
s
;
delete
t1
;
delete
t2
;
delete
sGPU
;
delete
tGPU1
;
delete
tGPU2
;
delete
[]
sDimSize
;
delete
[]
tDimSize1
;
delete
[]
tDimSize2
;
return
cpuTest
&&
gpuTest
;
#else
/* destroy variables */
delete
s
,
t1
,
t2
;
delete
[]
sDimSize
,
tDimSize1
,
tDimSize2
;
delete
s
;
delete
t1
;
delete
t2
;
delete
[]
sDimSize
;
delete
[]
tDimSize1
;
delete
[]
tDimSize2
;
return
cpuTest
;
#endif // USE_CUDA
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论