Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
N
NiuTrans.Tensor
概览
Overview
Details
Activity
Cycle Analytics
版本库
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
问题
0
Issues
0
列表
Board
标记
里程碑
合并请求
0
Merge Requests
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
Snippets
成员
Collapse sidebar
Close sidebar
活动
图像
聊天
创建新问题
作业
提交
Issue Boards
Open sidebar
Emmay
NiuTrans.Tensor
Commits
d061d183
Commit
d061d183
authored
Mar 29, 2019
by
xiaotong
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
improve the code of the attention model
parent
a7223650
隐藏空白字符变更
内嵌
并排
正在显示
4 个修改的文件
包含
76 行增加
和
46 行删除
+76
-46
source/sample/transformer/T2TAttention.cpp
+66
-42
source/sample/transformer/T2TAttention.h
+7
-1
source/sample/transformer/T2TDecoder.cpp
+2
-2
source/sample/transformer/T2TEncoder.cpp
+1
-1
没有找到文件。
source/sample/transformer/T2TAttention.cpp
查看文件 @
d061d183
...
...
@@ -101,69 +101,93 @@ make the network
>> isTraining - indicates whether the model is used for training
<< return - multi-attention result
*/
XTensor
T2TAttention
::
Make
(
XTensor
&
k
,
XTensor
&
q
,
XTensor
&
v
,
XTensor
&
mask
,
bool
isTraining
,
bool
selfatt
)
XTensor
T2TAttention
::
Make
(
XTensor
&
k
,
XTensor
&
q
,
XTensor
&
v
,
XTensor
&
mask
,
bool
isTraining
)
{
XTensor
k2
;
XTensor
q2
;
XTensor
v2
;
if
(
selfatt
){
XTensor
con
;
XList
split
;
con
=
MMul
(
k
,
wbig
);
int
d1
=
con
.
GetDim
(
0
);
int
d2
=
con
.
GetDim
(
1
);
int
d3
=
con
.
GetDim
(
2
)
/
3
;
InitTensor3D
(
&
k2
,
d1
,
d2
,
d3
,
X_FLOAT
,
devID
,
mem
);
InitTensor3D
(
&
q2
,
d1
,
d2
,
d3
,
X_FLOAT
,
devID
,
mem
);
InitTensor3D
(
&
v2
,
d1
,
d2
,
d3
,
X_FLOAT
,
devID
,
mem
);
split
.
Add
(
&
q2
);
split
.
Add
(
&
k2
);
split
.
Add
(
&
v2
);
Split
(
con
,
split
,
2
,
3
);
}
else
{
/* linear transofmration before self-attention */
k2
=
MMul
(
k
,
wk
);
q2
=
MMul
(
q
,
wq
);
v2
=
MMul
(
v
,
wv
);
}
/* linear transformation before self-attention */
k2
=
MMul
(
k
,
wk
);
q2
=
MMul
(
q
,
wq
);
v2
=
MMul
(
v
,
wv
);
return
MakeAttention
(
k2
,
q2
,
v2
,
mask
,
isTraining
);
}
/*
make the network given a big tensor that keeps keys, queries and values
>> kqv - the big tensor
>> mask - as it is
>> isTraining - indicates whether the model is used for training
*/
XTensor
T2TAttention
::
MakeBig
(
XTensor
&
kqv
,
XTensor
&
mask
,
bool
isTraining
)
{
XTensor
k2
;
XTensor
q2
;
XTensor
v2
;
XTensor
kqv2
;
XList
split
;
kqv2
=
MMul
(
kqv
,
wbig
);
int
d1
=
kqv2
.
GetDim
(
0
);
int
d2
=
kqv2
.
GetDim
(
1
);
int
d3
=
kqv2
.
GetDim
(
2
)
/
3
;
InitTensor3D
(
&
k2
,
d1
,
d2
,
d3
,
X_FLOAT
,
devID
,
mem
);
InitTensor3D
(
&
q2
,
d1
,
d2
,
d3
,
X_FLOAT
,
devID
,
mem
);
InitTensor3D
(
&
v2
,
d1
,
d2
,
d3
,
X_FLOAT
,
devID
,
mem
);
split
.
Add
(
&
q2
);
split
.
Add
(
&
k2
);
split
.
Add
(
&
v2
);
Split
(
kqv2
,
split
,
2
,
3
);
return
MakeAttention
(
k2
,
q2
,
v2
,
mask
,
isTraining
);
}
/*
make the attention network given keys, queries and values (after linear transformation)
>> k - keys. It might be of size B * L * H
where B = batch size, L = sequence length,
and H = vector size of each position
>> q - queries
>> v - values
>> mask - as it is
>> isTraining - indicates whether the model is used for training
*/
XTensor
T2TAttention
::
MakeAttention
(
XTensor
&
k
,
XTensor
&
q
,
XTensor
&
v
,
XTensor
&
mask
,
bool
isTraining
)
{
XTensor
kheads
;
XTensor
qheads
;
XTensor
vheads
;
/* multi head */
kheads
=
Split
(
k
2
,
k2
.
order
-
1
,
nhead
);
qheads
=
Split
(
q
2
,
q2
.
order
-
1
,
nhead
);
vheads
=
Split
(
v
2
,
v2
.
order
-
1
,
nhead
);
kheads
=
Split
(
k
,
k
.
order
-
1
,
nhead
);
qheads
=
Split
(
q
,
q
.
order
-
1
,
nhead
);
vheads
=
Split
(
v
,
v
.
order
-
1
,
nhead
);
XTensor
att
;
XTensor
dot
;
XTensor
scalar
;
/* scalar = softmax(Q * K^T / sqrt(dk)) * V */
dot
=
BMMul
(
qheads
,
X_NOTRANS
,
kheads
,
X_TRANS
);
if
(
isMasked
)
dot
=
dot
+
mask
;
dot
=
Linear
(
dot
,
1.0
F
/
(
float
)
sqrt
((
float
)
dk
/
nhead
));
scalar
=
Softmax
(
dot
,
-
1
);
if
(
isTraining
&&
dropoutP
>
0
)
scalar
=
Dropout
(
scalar
,
dropoutP
);
att
=
BMMul
(
scalar
,
vheads
);
/* concatenate the heads */
return
MMul
(
Merge
(
att
,
att
.
order
-
1
),
wa
);
}
...
...
source/sample/transformer/T2TAttention.h
查看文件 @
d061d183
...
...
@@ -97,7 +97,13 @@ public:
int
myDevID
=
-
1
,
XMem
*
myMem
=
NULL
);
/* make the network */
XTensor
Make
(
XTensor
&
k
,
XTensor
&
q
,
XTensor
&
v
,
XTensor
&
mask
,
bool
isTraining
,
bool
selfatt
);
XTensor
Make
(
XTensor
&
k
,
XTensor
&
q
,
XTensor
&
v
,
XTensor
&
mask
,
bool
isTraining
);
/* make the network given a big tensor that keeps keys, queries and values */
XTensor
MakeBig
(
XTensor
&
kqv
,
XTensor
&
mask
,
bool
isTraining
);
/* make the attention network given keys, queries and values (after linear transformation) */
XTensor
MakeAttention
(
XTensor
&
k
,
XTensor
&
q
,
XTensor
&
v
,
XTensor
&
mask
,
bool
isTraining
);
};
}
...
...
source/sample/transformer/T2TDecoder.cpp
查看文件 @
d061d183
...
...
@@ -119,7 +119,7 @@ XTensor AttDecoder::Make(XTensor &inputDec, XTensor &outputEnc, XTensor &mask, X
/******************/
/* self attention */
att
=
attentions
[
i
].
Make
(
x
,
x
,
x
,
mask
,
isTraining
,
true
);
att
=
attentions
[
i
].
Make
Big
(
x
,
mask
,
isTraining
);
/* dropout */
if
(
isTraining
&&
dropoutP
>
0
)
...
...
@@ -133,7 +133,7 @@ XTensor AttDecoder::Make(XTensor &inputDec, XTensor &outputEnc, XTensor &mask, X
/*****************************/
/* encoder-decoder attention */
ende
=
attentionsEnde
[
i
].
Make
(
outputEnc
,
x
,
outputEnc
,
maskEncDec
,
isTraining
,
false
);
ende
=
attentionsEnde
[
i
].
Make
(
outputEnc
,
x
,
outputEnc
,
maskEncDec
,
isTraining
);
/* dropout */
if
(
isTraining
&&
dropoutP
>
0
)
...
...
source/sample/transformer/T2TEncoder.cpp
查看文件 @
d061d183
...
...
@@ -114,7 +114,7 @@ XTensor AttEncoder::Make(XTensor &input, XTensor &mask, XTensor &maskEncDec, boo
XTensor
res
;
/* self attention */
att
=
attentions
[
i
].
Make
(
x
,
x
,
x
,
mask
,
isTraining
,
true
);
att
=
attentions
[
i
].
Make
Big
(
x
,
mask
,
isTraining
);
/* dropout */
if
(
isTraining
&&
dropoutP
>
0
)
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论