Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
T
Tensor.LowPrecision
概览
Overview
Details
Activity
Cycle Analytics
版本库
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
问题
0
Issues
0
列表
Board
标记
里程碑
合并请求
0
Merge Requests
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
Snippets
成员
Collapse sidebar
Close sidebar
活动
图像
聊天
创建新问题
作业
提交
Issue Boards
Open sidebar
魏冰浩
Tensor.LowPrecision
Commits
2ffea05e
Commit
2ffea05e
authored
5 years ago
by
linye
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
update hardtanh
parent
daf4765a
隐藏空白字符变更
内嵌
并排
正在显示
2 个修改的文件
包含
114 行增加
和
2 行删除
+114
-2
source/sample/fnnlm/FNNLM.cpp
+1
-1
source/tensor/test/THardTanH.cpp
+113
-1
没有找到文件。
source/sample/fnnlm/FNNLM.cpp
查看文件 @
2ffea05e
...
...
@@ -783,7 +783,7 @@ void Forward(XTensor inputs[], XTensor &output, FNNModel &model, FNNNet &net)
InitModelTensor2D
(
embedding
,
batchSize
,
model
.
eSize
,
model
);
/* generate word embedding of position i:
embedding = input * w
*/
embedding = input * w */
_MatrixMul
(
&
input
,
X_NOTRANS
,
&
w
,
X_NOTRANS
,
&
embedding
);
eList
.
Add
(
&
net
.
embeddings
[
i
]);
...
...
This diff is collapsed.
Click to expand it.
source/tensor/test/THardTanH.cpp
查看文件 @
2ffea05e
...
...
@@ -17,7 +17,7 @@
/*
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-20
* $Update by: Lin Ye (email: linye2015@outlook.com) 2019-07-
06
float16 added
* $Update by: Lin Ye (email: linye2015@outlook.com) 2019-07-
15
float16 added
*/
#include "../XTensor.h"
...
...
@@ -297,6 +297,108 @@ bool TestHardTanH3()
#endif // USE_CUDA
}
/*
case 4: float16 test backward computation of HardTanH function.
dE/dx = dE/dy * dy/dx
hard tanh: y = 1 if x > 1
x if -1 <= x <= 1
-1 if x< -1
and dy/dx = 1 if -1 <= x <= 1
0 otherwise
In this case, lossName=SQUAREDERROR.
*/
bool
TestHardTanH4
()
{
/* a tensor of size (2, 3) */
int
order
=
2
;
int
*
dimSize
=
new
int
[
order
];
dimSize
[
0
]
=
2
;
dimSize
[
1
]
=
3
;
int
unitNum
=
1
;
for
(
int
i
=
0
;
i
<
order
;
i
++
)
unitNum
*=
dimSize
[
i
];
DTYPE
xData
[
2
][
3
]
=
{
{
0.5
F
,
-
1.0
F
,
2.0
F
},
{
3.5
F
,
-
4.5
F
,
1.0
F
}
};
DTYPE
goldData
[
2
][
3
]
=
{
{
1.0
F
,
1.0
F
,
1.0
F
},
{
1.0
F
,
1.0
F
,
1.0
F
}
};
DTYPE
yAnswer
[
2
][
3
]
=
{
{
0.5
F
,
-
1.0
F
,
1.0
F
},
{
1.0
F
,
-
1.0
F
,
1.0
F
}
};
DTYPE
dedyAnswer
[
2
][
3
]
=
{
{
-
0.5
F
,
-
2.0
F
,
0.0
F
},
{
0.0
F
,
-
2.0
F
,
0.0
F
}
};
DTYPE
dedxAnswer
[
2
][
3
]
=
{
{
-
0.5
F
,
-
2.0
F
,
0.0
F
},
{
0.0
F
,
0.0
F
,
-
0.0
F
}
};
/* CPU test */
bool
cpuTest
=
true
;
#ifdef USE_CUDA
/* GPU test */
bool
gpuTest
=
true
;
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* create float16 tensors */
XTensor
xHalfGPU
;
XTensor
yHalfGPU
;
XTensor
goldHalfGPU
;
XTensor
dedyHalfGPU
;
XTensor
dedxHalfGPU
;
/* initialize variables */
xGPU
->
SetData
(
xData
,
unitNum
);
goldGPU
->
SetData
(
goldData
,
unitNum
);
yGPU
->
SetZeroAll
();
dedyGPU
->
SetZeroAll
();
dedxGPU
->
SetZeroAll
();
/* convert data type from float to float16 */
xHalfGPU
=
ConvertDataType
(
*
xGPU
,
X_FLOAT16
);
yHalfGPU
=
ConvertDataType
(
*
yGPU
,
X_FLOAT16
);
goldHalfGPU
=
ConvertDataType
(
*
goldGPU
,
X_FLOAT16
);
dedyHalfGPU
=
ConvertDataType
(
*
dedyGPU
,
X_FLOAT16
);
dedxHalfGPU
=
ConvertDataType
(
*
dedxGPU
,
X_FLOAT16
);
/* call hardtanh function */
_HardTanH
(
&
xHalfGPU
,
&
yHalfGPU
);
/* call hardtanhbackward function */
_HardTanHBackward
(
&
goldHalfGPU
,
&
yHalfGPU
,
&
xHalfGPU
,
&
dedyHalfGPU
,
&
dedxHalfGPU
,
SQUAREDERROR
);
/* convert data type from float16 to float */
_ConvertDataType
(
&
yHalfGPU
,
yGPU
);
_ConvertDataType
(
&
dedyHalfGPU
,
dedyGPU
);
_ConvertDataType
(
&
dedxHalfGPU
,
dedxGPU
);
/* check results */
gpuTest
=
yGPU
->
CheckData
(
yAnswer
,
unitNum
,
1e-4
F
)
&&
dedxGPU
->
CheckData
(
dedxAnswer
,
unitNum
,
1e-4
F
)
&&
dedyGPU
->
CheckData
(
dedyAnswer
,
unitNum
,
1e-4
F
);
/* destroy variables */
delete
xGPU
;
delete
yGPU
;
delete
goldGPU
;
delete
dedxGPU
;
delete
dedyGPU
;
delete
[]
dimSize
;
return
cpuTest
&&
gpuTest
;
#else
/* destroy variables */
delete
[]
dimSize
;
return
cpuTest
;
#endif // USE_CUDA
}
/* other cases */
/*
...
...
@@ -339,6 +441,16 @@ bool TestHardTanH()
else
XPRINT
(
0
,
stdout
,
">> case 3 passed!
\n
"
);
/* case 4 test */
caseFlag
=
TestHardTanH4
();
if
(
!
caseFlag
)
{
returnFlag
=
false
;
XPRINT
(
0
,
stdout
,
">> case 4 failed!
\n
"
);
}
else
XPRINT
(
0
,
stdout
,
">> case 4 passed!
\n
"
);
/* other cases test */
/*
TODO!!
...
...
This diff is collapsed.
Click to expand it.
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论