Commit f59dfd01 by 孟霞

合并分支 'mengxia' 到 'caorunzhe'

Mengxia

查看合并请求 !476
parents 61d5d075 d1ad458a
\tikzstyle{yy} = [circle,minimum height=1cm,text centered,draw=black]
\begin{tikzpicture}[node distance = 0,scale = 1]
\begin{scope}[xshift=0.2in]
\tikzstyle{every node}=[scale=1]
\node (y1)[yy]{\large$y_1$};
\node (y2)[yy,right of = y1,xshift=1.5cm]{\large$y_2$};
\node (y3)[yy,right of = y2,xshift=1.5cm]{\large$y_3$};
\node (y4)[yy,right of = y3,xshift=1.5cm]{\large$y_4$};
\node (y5)[yy,right of = y4,xshift=1.5cm]{\large$y_5$};
\node (y6)[yy,right of = y5,xshift=1.5cm]{\large$y_6$};
\node [anchor=north,font=\scriptsize] (labela) at ([xshift=1.8em,yshift=-3em]y3.south) {(a) 自回归模型};
\draw[->,thick] (y1.north) .. controls ([yshift=2em]y1.north) and ([yshift=2em]y3.north).. (y3.north);
\draw[->,thick] (y1.north) .. controls ([yshift=3em]y1.north) and ([yshift=3em]y4.north).. (y4.north);
\draw[->,thick] (y1.south) .. controls ([yshift=-2em]y1.south) and ([yshift=-2em]y5.south).. (y5.south);
\draw[->,thick] (y1.south) .. controls ([yshift=-3em]y1.south) and ([yshift=-3em]y6.south).. (y6.south);
\draw[->,thick] (y2.north) .. controls ([yshift=2em]y2.north) and ([yshift=2em]y4.north).. (y4.north);
\draw[->,thick] (y2.south) .. controls ([yshift=-2em]y2.south) and ([yshift=-2em]y5.south).. (y5.south);
\draw[->,thick] (y2.south) .. controls ([yshift=-3em]y2.south) and ([yshift=-3em]y6.south).. (y6.south);
\draw[->,thick] (y3.south) .. controls ([yshift=-2em]y3.south) and ([yshift=-2em]y5.south).. (y5.south);
\draw[->,thick] (y3.south) .. controls ([yshift=-3em]y3.south) and ([yshift=-3em]y6.south).. (y6.south);
\draw[->,thick] (y4.south) .. controls ([yshift=-2em]y4.south) and ([yshift=-2em]y6.south).. (y6.south);
\draw[->,red,very thick](y1.east)to(y2.west);
\draw[->,red,very thick](y2.east)to(y3.west);
\draw[->,red,very thick](y3.east)to(y4.west);
\draw[->,red,very thick](y4.east)to(y5.west);
\draw[->,red,very thick](y5.east)to(y6.west);
\end{scope}
\begin{scope}[yshift=-1.6in]
\tikzstyle{rec} = [rectangle,minimum width=2.8cm,minimum height=1.5cm,text centered,draw=black,dashed]
\tikzstyle{every node}=[scale=1]
\node (y1)[yy]{\large$y_1$};
\node (y2)[yy,right of = y1,xshift=1.5cm]{\large$y_2$};
\node (y3)[yy,right of = y2,xshift=2cm]{\large$y_3$};
\node (y4)[yy,right of = y3,xshift=1.5cm]{\large$y_4$};
\node (y5)[yy,right of = y4,xshift=2cm]{\large$y_5$};
\node (y6)[yy,right of = y5,xshift=1.5cm]{\large$y_6$};
\node (rec1)[rec,right of = y1,xshift=0.75cm]{};
\node (rec2)[rec,right of = y3,xshift=0.75cm]{};
\node (rec3)[rec,right of = y5,xshift=0.75cm]{};
\node [anchor=north,font=\scriptsize] (labelb) at ([xshift=2.2em,yshift=-1em]y3.south) {(b) 半自回归模型};
\draw[->,red,very thick](rec1.east)to(rec2.west);
\draw[->,red,very thick](rec2.east)to(rec3.west);
\draw[->,thick] (rec1.north) .. controls ([yshift=3em]rec1.north) and ([yshift=3em]rec3.north).. (rec3.north);
\end{scope}
\begin{scope}[xshift=0.3in,yshift=-2.5in]
\tikzstyle{every node}=[scale=1]
\node (y1)[yy]{\large$y_1$};
\node (y2)[yy,right of = y1,xshift=1.5cm]{\large$y_2$};
\node (y3)[yy,right of = y2,xshift=1.5cm]{\large$y_3$};
\node (y4)[yy,right of = y3,xshift=1.5cm]{\large$y_4$};
\node (y5)[yy,right of = y4,xshift=1.5cm]{\large$y_5$};
\node (y6)[yy,right of = y5,xshift=1.5cm]{\large$y_6$};
\node [anchor=north,font=\scriptsize] (labelc) at ([xshift=1.5em,yshift=-0.5em]y3.south) {(c) 非自回归模型};
\end{scope}
\end{tikzpicture}
\ No newline at end of file
\begin{tikzpicture}
\tikzstyle{snode} = [draw,inner sep=1pt,minimum width=3em,minimum height=0.5em,rounded corners=1pt,fill=green!30!white]
\tikzstyle{pnode} = [draw,inner sep=1pt,minimum width=1em,minimum height=0.5em,rounded corners=1pt]
\node [anchor=west,snode] (s1) at (0,0) {\tiny{}};
\node [anchor=north west,snode,minimum width=6.3em] (s2) at ([yshift=-0.3em]s1.south west) {\tiny{}};
\node [anchor=north west,snode,minimum width=2em] (s3) at ([yshift=-0.3em]s2.south west) {\tiny{}};
\node [anchor=north west,snode,minimum width=5.5em] (s4) at ([yshift=-0.3em]s3.south west) {\tiny{}};
\node [anchor=north west,snode,minimum width=5.8em] (s5) at ([yshift=-0.3em]s4.south west) {\tiny{}};
\node [anchor=north west,snode,minimum width=3em] (s6) at ([yshift=-0.3em]s5.south west) {\tiny{}};
\node [anchor=east] (label1) at ([xshift=-0.8em,yshift=0.6em]s1.west) {{句子:}};
\node [anchor=west,pnode,minimum width=3em] (p1) at ([xshift=0.3em]s1.east) {\tiny{}};
\node [anchor=west,pnode,minimum width=4em] (p3) at ([xshift=0.3em]s3.east) {\tiny{}};
\node [anchor=west,pnode,minimum width=0.5em] (p4) at ([xshift=0.3em]s4.east) {\tiny{}};
\node [anchor=west,pnode,minimum width=0.2em] (p5) at ([xshift=0.3em]s5.east) {\tiny{}};
\node [anchor=west,pnode,minimum width=3em] (p6) at ([xshift=0.3em]s6.east) {\tiny{}};
\node [rectangle,inner sep=0.5em,rounded corners=2pt,very thick,dotted,draw=ugreen!80] [fit = (s1) (s6) (p1) (p6)] (box0) {};
\node[rectangle,inner sep=0.5em,rounded corners=1pt,draw,fill=blue!15] (model) at ([xshift=4em]box0.east){{模型}};
% big batch
\node [anchor=west,snode] (sbi1) at ([xshift=3em,yshift=6em]model.east) {\tiny{}};
\node [anchor=north west,snode,minimum width=6.3em] (sbi2) at ([yshift=-0.3em]sbi1.south west) {\tiny{}};
\node [anchor=north west,snode,minimum width=2em] (sbi3) at ([yshift=-0.3em]sbi2.south west) {\tiny{}};
\node [anchor=north west,snode,minimum width=5.5em] (sbi4) at ([yshift=-0.3em]sbi3.south west) {\tiny{}};
\node [anchor=north west,snode,minimum width=5.8em] (sbi5) at ([yshift=-0.3em]sbi4.south west) {\tiny{}};
\node [anchor=north west,snode,minimum width=3em] (sbi6) at ([yshift=-0.3em]sbi5.south west) {\tiny{}};
\node [anchor=east] (label1) at ([xshift=-0.8em,yshift=-1em]sbi1.west) {{大batch}};
\node [anchor=west,pnode,minimum width=3em] (pbi1) at ([xshift=0.3em]sbi1.east) {\tiny{}};
\node [anchor=west,pnode,minimum width=4em] (pbi3) at ([xshift=0.3em]sbi3.east) {\tiny{}};
\node [anchor=west,pnode,minimum width=0.5em] (pbi4) at ([xshift=0.3em]sbi4.east) {\tiny{}};
\node [anchor=west,pnode,minimum width=0.2em] (pbi5) at ([xshift=0.3em]sbi5.east) {\tiny{}};
\node [anchor=west,pnode,minimum width=3em] (pbi6) at ([xshift=0.3em]sbi6.east) {\tiny{}};
\node [rectangle,inner sep=0.5em,rounded corners=2pt,very thick,dotted,draw=ugreen!80] [fit = (sbi1) (sbi6) (pbi1) (pbi6)] (box1) {};
% small batch
\node [anchor=west,snode,minimum width=5.5em] (sma1) at ([xshift=3em,yshift=-3em]model.east) {\tiny{}};
\node [anchor=north west,snode,minimum width=5.8em] (sma2) at ([yshift=-0.3em]sma1.south west) {\tiny{}};
\node [anchor=north west,snode,minimum width=6.3em] (sma3) at ([yshift=-0.3em]sma2.south west) {\tiny{}};
\node [anchor=east] (label1) at ([xshift=-0.8em,yshift=-2em]sma1.west) {{小batch}};
\node [anchor=west,pnode,minimum width=0.5em] (pma1) at ([xshift=0.3em]sma1.east) {\tiny{}};
\node [anchor=west,pnode,minimum width=0.2em] (pma2) at ([xshift=0.3em]sma2.east) {\tiny{}};
\node [rectangle,inner sep=0.5em,rounded corners=2pt,very thick,dotted,draw=ugreen!80] [fit = (sma1) (sma3) (pma1) (pma2)] (box2) {};
% small batch
\node [anchor=west,snode,minimum width=2em] (sma4) at ([xshift=4em,yshift=0em]sma1.east) {\tiny{}};
\node [anchor=north west,snode,minimum width=3em] (sma5) at ([yshift=-0.3em]sma4.south west) {\tiny{}};
\node [anchor=north west,snode,minimum width=3em] (sma6) at ([yshift=-0.3em]sma5.south west) {\tiny{}};
\node [anchor=west,pnode,minimum width=0.7em] (pma4) at ([xshift=0.3em]sma4.east) {\tiny{}};
\node [rectangle,inner sep=0.5em,rounded corners=2pt,very thick,dotted,draw=ugreen!80] [fit = (sma4) (sma6) (pma4)] (box3) {};
\draw [->,very thick] (box0.east) -- (model.west);
\draw [->,thick] (model.east) .. controls +(east:0.5) and +(west:0.5) .. ([xshift=-1em]box1.west);
\draw [->,thick] (model.east) .. controls +(east:0.5) and +(west:0.5) .. ([xshift=-1em]box2.west);
\draw [->,very thick] (box2.east) -- (box3.west);
%%%%%
\node [] (t10) at ([yshift=1.5em]box1.north) {$t_1$};
\node [] (t11) at ([yshift=1.5em]box2.north) {$t_1$};
\node [] (t2) at ([yshift=1.5em]box3.north) {$t_2$};
\draw [very thick,decorate,decoration={brace}] ([xshift=0em,yshift=0.3em]box1.north west) to node [midway,name=final] {} ([xshift=0em,yshift=0.3em]box1.north east);
\draw [very thick,decorate,decoration={brace}] ([xshift=0em,yshift=0.3em]box2.north west) to node [midway,name=final] {} ([xshift=0em,yshift=0.3em]box2.north east);
\draw [very thick,decorate,decoration={brace}] ([xshift=0em,yshift=0.3em]box3.north west) to node [midway,name=final] {} ([xshift=0em,yshift=0.3em]box3.north east);
\node [] (m1) at ([xshift=1.5em]box1.east) {$m_1$};
\node [] (m2) at ([xshift=1.5em]box3.east) {$m_2$};
\draw [very thick,decorate,decoration={brace}] ([xshift=3pt]box1.north east) to node [midway,name=final] {} ([xshift=3pt]box1.south east);
\draw [very thick,decorate,decoration={brace}] ([xshift=3pt]box3.north east) to node [midway,name=final] {} ([xshift=3pt]box3.south east);
\node [rectangle,inner sep=0.5em,rounded corners=2pt,draw,fill=red!5,font=\scriptsize] at ([yshift=-2em,xshift=10em]sbi1.east) {
\begin{tabular}{l}
$m$: 显存 \\
$t$: 时间 \\
$m_1>m_2$ \\
$t_1>t_2$
\end{tabular}
};
\end{tikzpicture}
\ No newline at end of file
%%%------------------------------------------------------------------------------------------------------------
\begin{tikzpicture}
\scriptsize{
\begin{axis}[
width=8cm,
height=5cm,
yticklabel style={/pgf/number format/.cd,fixed,precision=2},
xticklabel style={color=white},
xlabel={\footnotesize{Beam Size}},ylabel={\footnotesize{BLEU}},
ymin=28.8,ymax=30.4,
xmin=0,xmax=10,
xtick={0,4,6,7,8,9,10},
ytick={28.8,29.0,29.2,29.4,29.6,29.8,30.0,30.2,30.4},
legend style={yshift=-5em,xshift=0em,legend cell align=left,legend plot pos=right}
]
\addplot[purple,mark=square,mark=star,very thick] coordinates {(0,29.3) (4,29.7) (6,30.05) (7,30.1) (7.6,30.2) (8,30.3) (8.3,30.2) (8.7,30.08) (9,29.98) (9.6,29.6)(10,28.8) };
\end{axis}
}
\node[inner sep=0pt] at (0,-1em) {1};
\node[inner sep=0pt] at (9.15em,-1em) {2};
\node[inner sep=0pt] at (13.75em,-1em) {3};
\node[inner sep=0pt] at (16.05em,-1em) {5};
\node[inner sep=0pt] at (18.35em,-1em) {10};
\node[inner sep=0pt] at (20.65em,-1em) {30};
\node[inner sep=0pt] at (22.9em,-1em) {100};
\end{tikzpicture}
%%%--------------------------------------------------------------------------------------------
\ No newline at end of file
%\definecolor{dblue}{cmyk}{0.99998,1,0,0 }
\definecolor{dblue}{cmyk}{100,0,90,0 }
\begin{tikzpicture}[decoration=brace]
\begin{scope}
\setlength{\wseg}{1.5cm}
\setlength{\hseg}{0.6cm}
\setlength{\wnode}{2cm}
\setlength{\hnode}{1.2cm}
\tikzstyle{layernode} = [rectangle,draw,thick,densely dotted,inner sep=3pt,rounded corners,minimum width=2.1\wnode,minimum height=2.7\hnode]
\tikzstyle{attnnode} = [rectangle,draw,inner sep=3pt,rounded corners, minimum width=2\wnode,minimum height=2.2\hnode]
\tikzstyle{thinnode} = [rectangle,inner sep=1pt,rounded corners=1pt,minimum size=0.3\hnode,font=\scriptsize]
\tikzstyle{fatnode} = [rectangle,inner sep=1pt,rounded corners=1pt,minimum height=0.3\hnode,minimum width=\wnode,font=\small]
% 0.3\wseg here can be used to determine the distance between two adjacent blocks
\coordinate (layer00) at (0,0);
\foreach \i / \j in {1/0,2/1,3/2,4/3,5/4}
\coordinate (layer0\i) at ([xshift=2.05\wnode+0.3\wseg]layer0\j);
\node[layernode,anchor=north] (layer11) at ([yshift=-\hseg]layer01.south) {};
\node[attnnode,anchor=south] (attn11) at ([yshift=0.1\hnode]layer11.south) {};
\node[anchor=north west,inner sep=4pt,font=\small] () at (attn11.north west) {Attention};
\node[anchor=south,inner sep=0pt] (out11) at ([yshift=0.3\hseg]attn11.north) {$\cdots$};
\node[thinnode,anchor=south west,thick,draw=dblue,text=black] (q11) at ([xshift=0.1\wseg,yshift=0.2\hseg]attn11.south west) {$Q^n$};
\node[thinnode,anchor=south,thick,draw=orange,text=black] (k11) at ([yshift=0.2\hseg]attn11.south) {$K^n$};
\node[thinnode,anchor=south east,thick,draw=purple,text=black] (v11) at ([xshift=-0.1\wseg,yshift=0.2\hseg]attn11.south east) {$V^n$};
\node[fatnode,anchor=south,thick,draw] (s11) at ([xshift=0.5\wseg,yshift=0.8\hseg]q11.north east) {$S^n\!=\!S(Q^n\!\cdot\!K^n)$};
\node[fatnode,anchor=south,thick,draw] (a11) at ([xshift=0.45\wseg,yshift=1.3\hseg+0.6\hnode]k11.north east) {$A^n\!=\!S^n\!\cdot\!V$};
\begin{scope}[fill=black!100]
\draw[-latex',thick,draw=black!100] (q11.north) .. controls +(north:0.5\hseg) and +(south:0.8\hseg) .. (s11.south);
\draw[-latex',thick,draw=black!100] (k11.north) .. controls +(north:0.5\hseg) and +(south:0.8\hseg) .. (s11.south);
\end{scope}
\begin{scope}[fill=black!100]
\draw[-latex',thick,draw=black!100] (s11.north) .. controls +(north:0.7\hseg) and +(south:0.8\hseg) ..(a11.south);
\draw[-latex',thick,draw=black!100] (v11.north) .. controls +(north:2.7\hseg) and +(south:0.9\hseg) .. (a11.south);
\end{scope}
\draw[-latex',thick] (a11.north).. controls +(north:0.3\hseg) and +(south:0.7\hseg) ..(out11.south);
\node[layernode,anchor=north] (layer12) at ([yshift=-\hseg]layer02.south) {};
\node[attnnode,anchor=south] (attn12) at ([yshift=0.1\hnode]layer12.south) {};
\node[anchor=north west,inner sep=4pt,font=\small] () at (attn12.north west) {Attention};
\node[anchor=south,inner sep=0pt] (out12) at ([yshift=0.3\hseg]attn12.north) {$\cdots$};
\node[thinnode,anchor=south west,thick,draw=dblue!40,text=black!40] (q12) at ([xshift=0.1\wseg,yshift=0.2\hseg]attn12.south west) {$Q^n$};
\node[thinnode,anchor=south,thick,draw=orange!40,text=black!40] (k12) at ([yshift=0.2\hseg]attn12.south) {$K^n$};
\node[thinnode,anchor=south east,thick,draw=purple,text=black] (v12) at ([xshift=-0.1\wseg,yshift=0.2\hseg]attn12.south east) {$V^n$};
\node[fatnode,anchor=south,thick,densely dashed,draw] (s12) at ([xshift=0.5\wseg,yshift=0.8\hseg]q12.north east) {$S^n\!=\!S^m$};
\node[fatnode,anchor=south,thick,draw] (a12) at ([xshift=0.45\wseg,yshift=1.3\hseg+0.6\hnode]k12.north east) {$A^n\!=\!S^n\!\cdot\!V$};
\begin{scope}[fill=black!40]
\draw[-latex',thick,draw=black!40] (q12.north) .. controls +(north:0.5\hseg) and +(south:0.8\hseg) .. (s12.south);
\draw[-latex',thick,draw=black!40] (k12.north) .. controls +(north:0.5\hseg) and +(south:0.8\hseg) .. (s12.south);
\end{scope}
\begin{scope}[fill=black!100]
\draw[-latex',thick,draw=black!100] (s12.north).. controls +(north:0.7\hseg) and +(south:0.8\hseg) .. (a12.south);
\draw[-latex',thick,draw=black!100] (v12.north).. controls +(north:2.7\hseg) and +(south:0.9\hseg) .. (a12.south);
\end{scope}
\draw[-latex',thick] (a12.north).. controls +(north:0.3\hseg) and +(south:0.7\hseg) ..(out12.south);
\node[layernode,anchor=north] (layer13) at ([yshift=-\hseg]layer03.south) {};
\node[attnnode,anchor=south] (attn13) at ([yshift=0.1\hnode]layer13.south) {};
\node[anchor=north west,inner sep=4pt,font=\small] () at (attn13.north west) {Attention};
\node[anchor=south,inner sep=0pt] (out13) at ([yshift=0.3\hseg]attn13.north) {$\cdots$};
\node[thinnode,anchor=south west,thick,draw=dblue!40,text=black!40] (q13) at ([xshift=0.1\wseg,yshift=0.2\hseg]attn13.south west) {$Q^n$};
\node[thinnode,anchor=south,thick,draw=orange!40,text=black!40] (k13) at ([yshift=0.2\hseg]attn13.south) {$K^n$};
\node[thinnode,anchor=south east,thick,draw=purple!40,text=black!40] (v13) at ([xshift=-0.1\wseg,yshift=0.2\hseg]attn13.south east) {$V^n$};
\node[fatnode,anchor=south,thick,draw=black!40,text=black!40] (s13) at ([xshift=0.5\wseg,yshift=0.8\hseg]q13.north east) {$S^n$};
\node[fatnode,anchor=south,thick,densely dashed,draw] (a13) at ([xshift=0.45\wseg,yshift=1.3\hseg+0.6\hnode]k13.north east) {$A^n\!=\!A^m$};
\begin{scope}[fill=black!40]
\draw[-latex',thick,draw=black!40] (q13.north) .. controls +(north:0.5\hseg) and +(south:0.8\hseg) .. (s13.south);
\draw[-latex',thick,draw=black!40] (k13.north) .. controls +(north:0.5\hseg) and +(south:0.8\hseg) .. (s13.south);
\end{scope}
\begin{scope}[fill=black!40]
\draw[-latex',thick,draw=black!40] (s13.north) .. controls +(north:0.7\hseg) and +(south:0.8\hseg) .. (a13.south);
\draw[-latex',thick,draw=black!40] (v13.north) .. controls +(north:2.7\hseg) and +(south:0.9\hseg) .. (a13.south);
\end{scope}
\draw[-latex',thick] (a13.north).. controls +(north:0.3\hseg) and +(south:0.7\hseg) ..(out13.south);
\foreach \i / \j / \k / \q / \s / \t / \v in
{2/1/1/100/100/100/100, 2/2/1/100/100/100/100, 2/3/1/100/100/100/100}
{
\node[layernode,anchor=north] (layer\i\j) at ([yshift=-0.8\hseg]layer\k\j.south) {};
\node[attnnode,anchor=south] (attn\i\j) at ([yshift=0.1\hnode]layer\i\j.south) {};
\node[anchor=north west,inner sep=4pt,font=\small] () at (attn\i\j.north west) {Attention};
\node[anchor=south,inner sep=0pt] (out\i\j) at ([yshift=0.3\hseg]attn\i\j.north) {$\cdots$};
\node[thinnode,anchor=south west,thick,draw=dblue!\q,text=black] (q\i\j) at ([xshift=0.1\wseg,yshift=0.2\hseg]attn\i\j.south west) {$Q^m$};
\node[thinnode,anchor=south,thick,draw=orange!\q,text=black] (k\i\j) at ([yshift=0.2\hseg]attn\i\j.south) {$K^m$};
\node[thinnode,anchor=south east,thick,draw=purple!\s,text=black] (v\i\j) at ([xshift=-0.1\wseg,yshift=0.2\hseg]attn\i\j.south east) {$V^m$};
\node[fatnode,anchor=south,thick,draw=black!\s] (s\i\j) at ([xshift=0.45\wseg,yshift=0.8\hseg]q\i\j.north east) {$S^m\!=\!S(Q^m\!\cdot\!K^m)$};
\node[fatnode,anchor=south,thick,draw=black!80] (a\i\j) at ([xshift=0.45\wseg,yshift=1.3\hseg+0.6\hnode]k\i\j.north east) {$A^m\!=\!S^m\!\cdot\!V$};
\begin{scope}[fill=black!\q]
\draw[-latex',thick,draw=black!\t] (q\i\j.north) .. controls +(north:0.5\hseg) and +(south:0.8\hseg) .. (s\i\j.south);
\draw[-latex',thick,draw=black!\t] (k\i\j.north) .. controls +(north:0.5\hseg) and +(south:0.8\hseg) .. (s\i\j.south);
\end{scope}
\begin{scope}[fill=black!\s]
\draw[-latex',thick,draw=black!\v] (s\i\j.north).. controls +(north:0.7\hseg) and +(south:0.8\hseg) ..(a\i\j.south);
\draw[-latex',thick,draw=black!\v] (v\i\j.north).. controls +(north:2.7\hseg) and +(south:0.9\hseg) ..(a\i\j.south);
\end{scope}
\draw[-latex',thick] (a\i\j.north).. controls +(north:0.3\hseg) and +(south:0.7\hseg) ..(out\i\j.south);
}
\draw[-latex',densely dashed,very thick] (s22.west) to [out=120,in=-120] (s12.west);
\draw[-latex',densely dashed,very thick] (a23.east) to [out=60,in=-60] (a13.east);
\foreach \i in {1,2,3}
{
\node[anchor=north west,inner sep=3pt,font=\tiny] () at ([yshift=-0.2em]layer1\i.north west) {Layer $n\!=\!m\!+\!i$};
\node[anchor=north west,inner sep=3pt,font=\tiny] () at ([yshift=-0.2em]layer2\i.north west) {Layer $m$};
\node[anchor=center,inner sep=1pt] (dot1\i) at ([yshift=0.5\hseg]layer1\i.north) {$\cdots$};
\draw[->,thick] (out1\i.north) -- ([yshift=0.1em]dot1\i.south);
\node[anchor=center,inner sep=1pt] (dot2\i) at ([yshift=-0.4\hseg]layer1\i.south) {$\cdots$};
\draw[->,thick] ([yshift=-0.15em]dot2\i.north) -- ([yshift=-0.3em]attn1\i.south);
\draw[->,thick] (out2\i.north) -- ([yshift=0.1em]dot2\i.south);
\node[anchor=center,inner sep=1pt] (dot3\i) at ([yshift=-0.4\hseg]layer2\i.south) {$\cdots$};
\draw[->,thick] ([yshift=-0.15em]dot3\i.north) -- ([yshift=-0.3em]attn2\i.south);
}
\node[anchor=north,align=left,inner sep=1pt,font=\footnotesize] () at (dot31.south) {(a) Standard Transformer Attention};
\node[anchor=north,align=left,inner sep=1pt,font=\footnotesize] () at (dot32.south) {(b) \textsc{San} Self-Attention};
\node[anchor=north,align=left,inner sep=1pt,font=\footnotesize] () at (dot33.south) {(c) \textsc{San} Encoder-Decoder Attention};
\end{scope}
\end{tikzpicture}
\ No newline at end of file
\centering
\hspace*{\fill}
\subfigure[假设选择]
{
\begin{tikzpicture}[scale=0.5]
\tikzstyle{system} = [rectangle,very thick,minimum width=1cm,font=\tiny];
\tikzstyle{output} = [rectangle,very thick,rounded corners=3pt,minimum width=1cm,align=center,font=\tiny];
\begin{scope}
\node [system,draw=orange,text=orange] (model3) at (0,0) {模型 $3$};
\node [system,draw=ugreen,text=ugreen,anchor=south] (model2) at ([yshift=0.3cm]model3.north) {模型 $2$};
\node [system,draw=red,text=red,anchor=south] (model1) at ([yshift=0.3cm]model2.north) {模型 $1$};
\node [output,draw=orange,text=orange,anchor=west] (output3) at ([xshift=0.5cm]model3.east) {输出 $3$};
\node [output,draw=ugreen,text=ugreen,anchor=west] (output2) at ([xshift=0.5cm]model2.east) {输出 $2$};
\node [output,draw=red,text=red,anchor=west] (output1) at ([xshift=0.5cm]model1.east) {输出 $1$};
\begin{pgfonlayer}{background}
\node [draw,thick,dashed,rounded corners=3pt,inner sep=2pt,fit=(output1) (output2) (output3)] (output) {};
\end{pgfonlayer}
\node [output,draw=ublue,text=ublue,minimum width=1cm,right=1cm of output] (final) {最终\\输出};
\draw [->,very thick] (model1) to (output1);
\draw [->,very thick] (model2) to (output2);
\draw [->,very thick] (model3) to (output3);
\draw [->,very thick] (output) to node [above,pos=0.5,font=\tiny] {选择} (final);
\end{scope}
\end{tikzpicture}
}
\hfill
\subfigure[预测融合]
{
\begin{tikzpicture}[scale=0.5]
\tikzstyle{system} = [rectangle,very thick,minimum width=1cm,font=\tiny];
\tikzstyle{output} = [rectangle,very thick,rounded corners=3pt,minimum width=1cm,align=center,font=\tiny];
\begin{scope}
\node [system,draw=orange,text=orange] (model3) at (0,0) {模型 $3$};
\node [system,draw=ugreen,text=ugreen,anchor=south] (model2) at ([yshift=0.3cm]model3.north) {模型 $2$};
\node [system,draw=red,text=red,anchor=south] (model1) at ([yshift=0.3cm]model2.north) {模型 $1$};
\begin{pgfonlayer}{background}
\node [draw,thick,dashed,inner sep=2pt,fit=(model3) (model2) (model1)] (ensemble) {};
\end{pgfonlayer}
\node [system,draw=ugreen,text=ugreen,right=1cm of ensemble] (model) {模型};
\node [output,draw=ublue,text=ublue,minimum width=1cm,anchor=west] (final) at ([xshift=0.5cm]model.east) {最终\\输出};
\draw [->,very thick] (ensemble) to node [above,pos=0.5,font=\tiny] {融合} (model);
\draw [->,very thick] (model) to (final);
\end{scope}
\end{tikzpicture}
}
\hspace*{\fill}
\\
\subfigure[译文重组]
{
\begin{tikzpicture}[scale=0.5]
\tikzstyle{system} = [rectangle,very thick,minimum width=1cm,font=\tiny];
\tikzstyle{output} = [rectangle,very thick,rounded corners=3pt,minimum width=1cm,align=center,font=\tiny];
\tikzstyle{dot} = [circle,fill=blue!40!white,minimum size=5pt,inner sep=0pt];
\begin{scope}
\node [system,draw=orange,text=orange] (model3) at (0,0) {模型 $3$};
\node [system,draw=ugreen,text=ugreen,anchor=south] (model2) at ([yshift=0.3cm]model3.north) {模型 $2$};
\node [system,draw=red,text=red,anchor=south] (model1) at ([yshift=0.3cm]model2.north) {模型 $1$};
\node [output,draw=orange,text=orange,anchor=west] (output3) at ([xshift=0.5cm]model3.east) {输出 $3$};
\node [output,draw=ugreen,text=ugreen,anchor=west] (output2) at ([xshift=0.5cm]model2.east) {输出 $2$};
\node [output,draw=red,text=red,anchor=west] (output1) at ([xshift=0.5cm]model1.east) {输出 $1$};
\draw [->,very thick] (model1) to (output1);
\draw [->,very thick] (model2) to (output2);
\draw [->,very thick] (model3) to (output3);
\begin{pgfonlayer}{background}
\node [draw,thick,dashed,rounded corners=3pt,inner sep=2pt,fit=(output1) (output2) (output3)] (output) {};
\end{pgfonlayer}
\node [dot,anchor=west] (lattice1) at ([shift={(1.5cm,0.5cm)}]output2.east) {};
\node [dot,anchor=west] (lattice2) at ([shift={(1cm,0)}]lattice1.east) {};
\node [dot,anchor=west] (lattice3) at ([shift={(1cm,0)}]lattice2.east) {};
\node [dot,anchor=west] (lattice4) at ([shift={(1.5cm,-0.5cm)}]output2.east) {};
\node [dot,anchor=west] (lattice5) at ([shift={(1cm,0)}]lattice4.east) {};
\draw [-latex,blue] (lattice1) to [out=30,in=150] (lattice2);
\draw [-latex,blue] (lattice2) to [out=30,in=150] (lattice3);
\draw [-latex,blue] (lattice4) to [out=15,in=-120] (lattice2);
\draw [-latex,blue] (lattice4) to [out=-30,in=-150] (lattice5);
\draw [-latex,blue] (lattice5) to [out=15,in=-120] (lattice3);
\draw [-latex,blue] (lattice5) to [out=-60,in=-90] (lattice3);
\begin{pgfonlayer}{background}
\node [draw=blue,fill=white,drop shadow,thick,rounded corners=3pt,inner sep=5pt,fit=(lattice1) (lattice2) (lattice3) (lattice4) (lattice5),label={[font=\tiny,label distance=0pt]90:Lattice}] (lattice) {};
\end{pgfonlayer}
\draw [->,very thick] (output) to (lattice);
\node [system,draw=purple,text=purple,anchor=west] (model) at ([xshift=5.3cm]output1.east) {模型};
\node [output,draw=ublue,text=ublue,minimum width=1cm,right=1.3cm of lattice] (final) {最终输出};
\draw [->,very thick] (model) |- (final);
\draw [->,very thick] (lattice) -- (final);
\end{scope}
\end{tikzpicture}
}
\begin{tikzpicture}
\tikzstyle{layer} = [rectangle,draw,rounded corners=3pt,minimum width=1cm,minimum height=0.5cm,line width=1pt];
\tikzstyle{prob} = [minimum width=0.3cm,rectangle,fill=ugreen!20!white,inner sep=0pt];
\begin{scope}[local bounding box=STANDARD]
\node [] (input1) at (0,0) {$\cdots$};
\node [anchor=south,layer,fill=orange!15!white] (net1) at ([yshift=0.5cm]input1.north) {};
\node [anchor=south,layer,fill=orange!15!white] (out1) at ([yshift=0.5cm]net1.north) {};
\node [anchor=south,prob,minimum height=0.9cm] (prob5) at ([yshift=1.2cm]out1.north) {};
\node [anchor=south east,prob,minimum height=0.1cm] (prob4) at ([xshift=-1pt]prob5.south west) {};
\node [anchor=south east,prob,minimum height=0.2cm] (prob3) at ([xshift=-1pt]prob4.south west) {};
\node [anchor=south east,prob,minimum height=0.5cm] (prob2) at ([xshift=-1pt]prob3.south west) {};
\node [anchor=south east,prob,minimum height=0.4cm] (prob1) at ([xshift=-1pt]prob2.south west) {};
\node [anchor=south west,prob,minimum height=0.6cm] (prob6) at ([xshift=1pt]prob5.south east) {};
\node [anchor=south west,prob,minimum height=0.3cm] (prob7) at ([xshift=1pt]prob6.south east) {};
\node [anchor=south west,prob,minimum height=0.2cm] (prob8) at ([xshift=1pt]prob7.south east) {};
\node [anchor=south west,prob,minimum height=0.1cm] (prob9) at ([xshift=1pt]prob8.south east) {};
\path [fill=blue!20!white,draw=white] (out1.north west) -- (prob1.south west) -- (prob9.south east) -- (out1.north east) -- (out1.north west);
\draw [->,line width=1pt] (input1) to (net1);
\draw [->,line width=1pt] (net1) to (out1);
\node [font=\small] (label1) at ([yshift=0.6cm]out1.north) {Softmax};
\end{scope}
\begin{scope}[local bounding box=SELECTION]
\node [] (input2) at (4.5cm,0) {$\cdots$};
\node [anchor=south,layer,fill=orange!15!white] (net2) at ([yshift=0.5cm]input2.north) {};
\node [anchor=south,layer,fill=orange!15!white] (out2) at ([yshift=0.5cm]net2.north) {};
\node [anchor=south,prob,minimum height=0.9cm] (prob5) at ([yshift=1.2cm]out2.north) {};
\node [anchor=south east,prob,minimum height=0.1cm,opacity=0] (prob4) at ([xshift=-1pt]prob5.south west) {};
\node [text=red,anchor=south,inner sep=1pt] () at (prob4.south) {$\times$};
\node [anchor=south east,prob,minimum height=0.2cm,opacity=0] (prob3) at ([xshift=-1pt]prob4.south west) {};
\node [text=red,anchor=south,inner sep=1pt] () at (prob3.south) {$\times$};
\node [anchor=south east,prob,minimum height=0.5cm] (prob2) at ([xshift=-1pt]prob3.south west) {};
\node [anchor=south east,prob,minimum height=0.4cm] (prob1) at ([xshift=-1pt]prob2.south west) {};
\node [anchor=south west,prob,minimum height=0.6cm,opacity=0] (prob6) at ([xshift=1pt]prob5.south east) {};
\node [text=red,anchor=south,inner sep=1pt] () at (prob6.south) {$\times$};
\node [anchor=south west,prob,minimum height=0.3cm,opacity=0] (prob7) at ([xshift=1pt]prob6.south east) {};
\node [text=red,anchor=south,inner sep=1pt] () at (prob7.south) {$\times$};
\node [anchor=south west,prob,minimum height=0.2cm] (prob8) at ([xshift=1pt]prob7.south east) {};
\node [anchor=south west,prob,minimum height=0.1cm,opacity=0] (prob9) at ([xshift=1pt]prob8.south east) {};
\node [text=red,anchor=south,inner sep=1pt] (plabel9) at (prob9.south) {$\times$};
\path [fill=blue!20!white,draw=white] (out2.north west) -- (prob1.south west) -- (prob9.south east) -- (out2.north east) -- (out2.north west);
\draw [->,line width=1pt] (input2) to (net2);
\draw [->,line width=1pt] (net2) to (out2);
\node [font=\small] (label2) at ([yshift=0.6cm]out2.north) {Softmax};
\node [anchor=west,layer,fill=orange!15!white] (net3) at ([xshift=2cm]net2.east) {};
\node [anchor=north,font=\scriptsize] (input3) at ([yshift=-0.5cm]net3.south) {源语};
\node [anchor=south,layer,align=center,font=\scriptsize,fill=yellow!10!white] (out3) at ([yshift=0.9cm]net3.north) {候选\\列表};
\draw [->,line width=1pt] (input3) to (net3);
\draw [->,line width=1pt] (net3) to (out3);
\draw [->,line width=1pt] (out3) |- (plabel9.east);
\end{scope}
\node [anchor=north,font=\scriptsize] () at ([yshift=-0.2em]STANDARD.south) {(a) 标准方法};
\node [anchor=north,font=\scriptsize] () at ([xshift=1.2em]SELECTION.south) {(b) 词汇选择};
\end{tikzpicture}
\ No newline at end of file
\begin{tikzpicture}
\node [anchor=north west] (part1) at (0,0) {\small{$\begin{bmatrix} \textrm{Have} \; 0.5 \\ \textrm{Has} \ \ \; 0.1 \\ . \\ . \\ . \\ . \\ . \end{bmatrix}$}};
\node [anchor=north](p1) at ([yshift=-0.3em]part1.south) {$P_1$};
\node [anchor=west](part2) at ([xshift=0.5em]part1.east){\small{$\begin{bmatrix} \textrm{Have} \; 0.2 \\ \textrm{Has} \ \ \; 0.3 \\ . \\ . \\ . \\ . \\ . \end{bmatrix}$}};
\node [anchor=north](p2) at ([yshift=-0.3em]part2.south) {$P_2$};
\node [anchor=west](part3) at ([xshift=0.5em]part2.east){\small{$\begin{bmatrix} \textrm{Have} \; 0.4 \\ \textrm{Has} \ \ \; 0.3 \\ . \\ . \\ . \\ . \\ . \end{bmatrix}$}};
\node [anchor=north](p3) at ([yshift=-0.3em]part3.south) {$P_3$};
\node [anchor=west](part4) at ([xshift=0.5em]part3.east){\huge{$\Rightarrow$}};
\node [anchor=west](part5) at ([xshift=0.5em]part4.east){\small{$\begin{bmatrix} \textrm{Have} \; 0.37 \\ \textrm{Has} \ \ \; 0.23 \\ . \\ . \\ . \\ . \\ . \end{bmatrix}$}};
\node [anchor=north](p5) at (part5.south) {$P=\sum\limits_{i=1}^{3}{\frac{1}{3}P_{i}}$};
\end{tikzpicture}
\begin{tikzpicture}
\tikzstyle{system} = [rectangle,very thick,minimum width=1.5cm,font=\scriptsize];
\tikzstyle{output} = [rectangle,very thick,rounded corners=3pt,minimum width=1.5cm,align=center,font=\scriptsize];
\begin{scope}[local bounding box=MULTIPLE]
\node [system,draw=orange,text=orange] (engine3) at (0,0) {系统 $n$};
\node [system,draw=ugreen,text=ugreen,anchor=south] (engine2) at ([yshift=0.6cm]engine3.north) {系统 $2$};
\node [system,draw=red,text=red,anchor=south] (engine1) at ([yshift=0.3cm]engine2.north) {系统 $1$};
\node [output,draw=orange,text=orange,anchor=west] (output3) at ([xshift=0.5cm]engine3.east) {输出 $n$};
\node [output,draw=ugreen,text=ugreen,anchor=west] (output2) at ([xshift=0.5cm]engine2.east) {输出 $2$};
\node [output,draw=red,text=red,anchor=west] (output1) at ([xshift=0.5cm]engine1.east) {输出 $1$};
\draw [very thick,decorate,decoration={brace}] ([xshift=3pt]output1.north east) to node [midway,name=final] {} ([xshift=3pt]output3.south east);
\node [output,draw=ublue,text=ublue,minimum width=1cm,right=0pt of final,minimum height=2.5em] () {最终\\输出};
\draw [->,very thick] (engine1) to (output1);
\draw [->,very thick] (engine2) to (output2);
\draw [->,very thick] (engine3) to (output3);
\node [] () at ([yshift=0.4cm]output3.north) {$\vdots$};
\end{scope}
\begin{scope}[local bounding box=SINGLE]
\node [output,draw=ugreen,text=ugreen,anchor=west] (output3) at ([xshift=4cm]output3.east) {输出 $n$};
\node [output,draw=ugreen,text=ugreen,anchor=west] (output2) at ([xshift=4cm]output2.east) {输出 $2$};
\node [output,draw=ugreen,text=ugreen,anchor=west] (output1) at ([xshift=4cm]output1.east) {输出 $1$};
\node [system,draw=ugreen,text=ugreen,anchor=east,align=center,inner sep=1.9pt] (engine) at ([xshift=-0.5cm]output2.west) {单系统};
\draw [very thick,decorate,decoration={brace}] ([xshift=3pt]output1.north east) to node [midway,name=final] {} ([xshift=3pt]output3.south east);
\node [output,draw=ublue,text=ublue,minimum width=1cm,right=0pt of final,minimum height=2.5em] () {最终\\输出};
\draw [->,very thick] (engine.east) to (output1.west);
\draw [->,very thick] (engine.east) to (output2.west);
\draw [->,very thick] (engine.east) to (output3.west);
\node [] () at ([yshift=0.4cm]output3.north) {$\vdots$};
\end{scope}
\node [align=center,anchor=north,font=\small] () at ([yshift=-0.3cm]MULTIPLE.south) {\footnotesize{(a) 多系统输出结构融合}};
\node [align=center,anchor=north,font=\small] () at ([yshift=-0.3cm]SINGLE.south) {\footnotesize{(b) 单系统多输出结果融合}};
\end{tikzpicture}
\ No newline at end of file
\tikzstyle{er} = [rectangle,minimum width=2.5cm,minimum height=1.5cm,text centered,draw=black]
\begin{tikzpicture}[node distance = 0,scale = 0.75]
\tikzstyle{every node}=[scale=0.75]
\node (encoder)[er,very thick,draw=black!70,fill=ugreen!20]{\Large{编码器}};
\node (decoder_1)[er,very thick,draw=black!70,right of=encoder,xshift=4cm,fill=red!20]{\Large{解码器1}};
\node (decoder_2)[er,very thick,draw=black!70,right of=decoder_1,xshift=4cm,fill=red!20]{\Large{解码器2}};
\node (point)[right of=decoder_2,xshift=2.5cm,]{\LARGE{...}};
\node (decoder_3)[er,very thick,draw=black!70,right of=point,xshift=2.5cm,fill=red!20]{\Large{解码器3}};
\draw [->,very thick,draw=black!70]([xshift=0.2cm]encoder.east) -- ([xshift=-0.2cm]decoder_1.west);
\draw [->,very thick,draw=black!70]([xshift=0.2cm]decoder_1.east) -- ([xshift=-0.2cm]decoder_2.west);
\draw [->,very thick,draw=black!70]([xshift=0.2cm]decoder_2.east) -- ([xshift=-0.1cm]point.west);
\draw [->,very thick,draw=black!70]([xshift=0.1cm]point.east) -- ([xshift=-0.2cm]decoder_3.west);
\draw [->,very thick,draw=black!70]([xshift=0,yshift=-1cm]encoder.south) -- ([xshift=0,yshift=-0.2cm]encoder.south);
\draw [->,very thick,draw=black!70]([xshift=0,yshift=0.2cm]encoder.north) -- ([xshift=0,yshift=1cm]encoder.north);
\node [below of = encoder,xshift=0cm,yshift=2.2cm]{预测目标长度};
\node [below of = encoder,xshift=0cm,yshift=-2.2cm]{\Large$x$};
\draw [->,very thick,draw=black!70]([xshift=0,yshift=-1cm]decoder_1.south) -- ([xshift=0,yshift=-0.2cm]decoder_1.south);
\draw [->,very thick,draw=black!70]([xshift=0,yshift=0.2cm]decoder_1.north) -- ([xshift=0,yshift=1cm]decoder_1.north);
\node [below of = decoder_1,xshift=0cm,yshift=-2.2cm]{\Large$x'$};
\node (line1_1)[below of = decoder_1,xshift=0cm,yshift=2.2cm]{\Large$y_1$};
\draw [->,thick,]([xshift=0,yshift=-1cm]decoder_2.south) -- ([xshift=0,yshift=-0.2cm]decoder_2.south);
\draw [->,very thick,draw=black!70]([xshift=0,yshift=0.2cm]decoder_2.north) -- ([xshift=0,yshift=1cm]decoder_2.north);
\node (line1_2)[below of = decoder_2,xshift=0cm,yshift=-2.2cm]{\Large$y_1$};
\node [below of = decoder_2,xshift=0cm,yshift=2.2cm]{\Large$y_2$};
\draw [->,very thick,draw=black!70]([xshift=0,yshift=-1cm]decoder_3.south) -- ([xshift=0,yshift=-0.2cm]decoder_3.south);
\draw [->,very thick,draw=black!70]([xshift=0,yshift=0.2cm]decoder_3.north) -- ([xshift=0,yshift=1cm]decoder_3.north);
\node (line3_2)[below of = decoder_3,xshift=0cm,yshift=-2.2cm]{\Large$y_{N-1}$};
\node [below of = decoder_3,xshift=0cm,yshift=2.2cm]{\Large$y_N$};
\draw[->,very thick,draw=black!70, out=0, in=180,dotted] (line1_1.east) to (line1_2.west);
\draw[->,very thick,draw=black!70, out=0, in=180,dotted] ([xshift=4cm]line1_1.east) to ([xshift=3cm]line1_2.west);
\draw[->,very thick,draw=black!70, out=0, in=180,dotted] ([xshift=6cm]line1_1.east) to (line3_2.west);
\end{tikzpicture}
\ No newline at end of file
\begin{tikzpicture}
%左
\node [anchor=west,draw=black,very thick,minimum width=6em,minimum height=3.5em,fill=blue!15,align=center,text=black] (part1) at (0,0) {\scriptsize{预测模块} \\ \tiny{(RNN/Transsformer)}};
\node [anchor=south] (text) at ([xshift=0.5em,yshift=-3.5em]part1.south) {\scriptsize{源语言句子(编码)}};
\node [anchor=east,draw=black,very thick,minimum width=6em,minimum height=3.5em,fill=blue!15,align=center,text=black] (part2) at ([xshift=10em]part1.east) {\scriptsize{搜索模块}};
\node [anchor=south] (text1) at ([xshift=0.5em,yshift=2.2em]part1.north) {\scriptsize{已经生成的目标语单词}};
\node [anchor=south] (text2) at ([xshift=0.5em,yshift=2.2em]part2.north) {\scriptsize{预测当前位置的单词分布}};
\draw [->,draw=black, thick] ([yshift=2em]part1.north) -- ([yshift=0.1em]part1.north);
\draw [->,draw=black, thick] ([yshift=-2em]part1.south) -- ([yshift=-0.1em]part1.south);
\draw [->,draw=black, thick] ([yshift=0em]part2.north) -- ([yshift=2em]part2.north);
\draw [->,draw=black,very thick] ([yshift=-0.7em]part1.east) -- ([xshift=-0.05em,yshift=-0.7em]part2.west);
\draw [->,draw=black,very thick,dashed] ([yshift=0.7em]part2.west) -- ([xshift=0.05em,yshift=0.7em]part1.east);
\end{tikzpicture}
\tikzstyle{er} = [rectangle,minimum width=7cm,minimum height=2.5cm,text centered,draw=black]
\begin{tikzpicture}[node distance = 0,scale = 0.55]
\tikzstyle{every node}=[scale=0.55]
\node (encoder)[er,very thick,minimum width=5cm,draw=black!70,fill=ugreen!20]{\huge{编码器}};
\node (decoder)[er,very thick,right of=encoder,xshift=7.75cm,draw=black!70,fill=red!20]{\huge{解码器}};
\node (decoder_1)[er,very thick,right of=decoder,xshift=8.75cm,draw=black!70,fill=red!20]{\huge{解码器}};
\draw [->,very thick,draw=black!70]([xshift=0.2cm]encoder.east) -- ([xshift=-0.2cm]decoder.west);
\draw [->,very thick,draw=black!70]([xshift=0.2cm]decoder.east) -- ([xshift=-0.2cm]decoder_1.west);
\foreach \x in {-1.8cm,-0.9cm,...,1.9cm}
\draw [->,very thick,draw=black!70]([xshift=\x,yshift=-1cm]encoder.south) -- ([xshift=\x,yshift=-0.2cm]encoder.south);
\node [below of = encoder,xshift=-1.8cm,yshift=-2.95cm,scale=1.2]{[cls]};
\node [below of = encoder,xshift=-0.7cm,yshift=-2.9cm,scale=1.2]{hello};
\node [below of = encoder,xshift=0cm,yshift=-3.05cm,scale=1.2]{,};
\node [below of = encoder,xshift=0.9cm,yshift=-2.9cm,scale=1.2]{world};
\node [below of = encoder,xshift=1.8cm,yshift=-2.9cm,scale=1.2]{!};
\draw [->,very thick,draw=black!70]([xshift=-1.8cm,yshift=0.2cm]encoder.north) -- ([xshift=-1.8cm,yshift=1cm]encoder.north);
\node [below of = encoder,xshift=-1.8cm,yshift=2.9cm,scale=1.5]{length:6};
\foreach \x in {-2.8cm,-1.7cm,...,2.9cm}
{\draw [->,very thick,draw=black!70]([xshift=\x,yshift=-1cm]decoder.south) -- ([xshift=\x,yshift=-0.2cm]decoder.south);
\draw [->,very thick,draw=black!70]([xshift=\x,yshift=0.2cm]decoder.north) -- ([xshift=\x,yshift=1cm]decoder.north);}
\node [below of = decoder,xshift=0cm,yshift=-2.9cm,scale=1.2]{{Mask\ Mask\ Mask\ Mask\ Mask\ Mask}};
\node [below of = decoder,xshift=-2.8cm,yshift=2.9cm,scale=1.6]{};
\node [below of = decoder,xshift=-1.7cm,yshift=2.9cm,scale=1.6]{};
\node [below of = decoder,xshift=-0.6cm,yshift=2.7cm,scale=1.6]{};
\node [below of = decoder,xshift=0.5cm,yshift=2.7cm,scale=1.6]{};
\node [below of = decoder,xshift=1.6cm,yshift=2.9cm,scale=1.6]{};
\node [below of = decoder,xshift=2.7cm,yshift=2.75cm,scale=1.6]{};
\foreach \x in {-2.8cm,-1.7cm,...,2.9cm}
{\draw [->,very thick,draw=black!70]([xshift=\x,yshift=-1cm]decoder_1.south) -- ([xshift=\x,yshift=-0.2cm]decoder_1.south);
\draw [->,very thick,draw=black!70]([xshift=\x,yshift=0.2cm]decoder_1.north) -- ([xshift=\x,yshift=1cm]decoder_1.north);}
\node [below of = decoder_1,xshift=-2.8cm,yshift=2.9cm,scale=1.6]{};
\node [below of = decoder_1,xshift=-1.7cm,yshift=2.9cm,scale=1.6]{};
\node [below of = decoder_1,xshift=-0.6cm,yshift=2.75cm,scale=1.6]{};
\node [below of = decoder_1,xshift=0.5cm,yshift=2.9cm,scale=1.6]{};
\node [below of = decoder_1,xshift=1.6cm,yshift=2.9cm,scale=1.6]{};
\node [below of = decoder_1,xshift=2.7cm,yshift=2.75cm,scale=1.6]{};
\node [below of = decoder_1,xshift=-2.8cm,yshift=-2.9cm,scale=1.2]{Mask};
\node [below of = decoder_1,xshift=-1.7cm,yshift=-2.9cm,scale=1.3]{};
\node [below of = decoder_1,xshift=-0.6cm,yshift=-3cm,scale=1.3]{};
\node [below of = decoder_1,xshift=0.5cm,yshift=-2.9cm,scale=1.2]{Mask};
\node [below of = decoder_1,xshift=1.6cm,yshift=-2.9cm,scale=1.2]{};
\node [below of = decoder_1,xshift=2.7cm,yshift=-3cm,scale=1.3]{};
\end{tikzpicture}
\ No newline at end of file
\definecolor{ublue}{rgb}{0.152,0.250,0.545}
\definecolor{ugreen}{rgb}{0,0.5,0}
%%% outline
%-------------------------------------------------------------------------
\begin{tikzpicture}
\tikzstyle{word} = [draw=ugreen!20,minimum size=1.8em, fill=ugreen!40, font=\scriptsize, rounded corners=1pt]
\tikzstyle{tgt} = [minimum height=1.6em,minimum width=5.2em,fill=black!10!yellow!30,font=\footnotesize,drop shadow={shadow xshift=0.15em,shadow yshift=-0.15em,}]
\tikzstyle{p} = [fill=blue!15,minimum width=0.4em,inner sep=0pt]
\node[ rounded corners=3pt, fill=red!20, drop shadow, minimum width=10em,minimum height=4em] (encoder) at (0,0) {Transformer 编码器 };
\node[anchor=west, rounded corners=3pt, fill=red!20, drop shadow, minimum width=14em,minimum height=4em] (decoder) at ([xshift=1cm]encoder.east) {Transformer 解码器};
\node[anchor=north,word] (en1) at ([yshift=-1.3em,xshift=-3em]encoder.south) {};
\node[anchor=north,word] (en2) at ([yshift=-1.3em]encoder.south) {};
\node[anchor=north,word] (en3) at ([yshift=-1.3em,xshift=3em]encoder.south) {};
\node[anchor=north,word] (de1) at ([yshift=-1.3em,xshift=-4em]decoder.south) {1};
\node[anchor=north,word] (de2) at ([yshift=-1.3em]decoder.south) {2};
\node[anchor=north,word] (de3) at ([yshift=-1.3em,xshift=4em]decoder.south) {3};
\node[p,anchor=south, minimum height=0.5em] (w1_1) at ([xshift=-7em,yshift=1.5em]decoder.north){};
\node[p,anchor=south,minimum height=2em] (w1_2) at ([xshift=0.3em]w1_1.south east){};
\node[p,anchor=south,minimum height=0.7em] (w1_3) at ([xshift=0.3em]w1_2.south east){};
\node[p,anchor=south,minimum height=0.6em] (w1_4) at ([xshift=0.3em]w1_3.south east){};
\node[p,anchor=south,minimum height=0.2em] (w1_5) at ([xshift=0.3em]w1_4.south east){};
\node[p,anchor=south,minimum height=0.3em] (w1_6) at ([xshift=0.3em]w1_5.south east){};
\node[p,anchor=south,minimum height=0.6em] (w1_7) at ([xshift=0.3em]w1_6.south east){};
\node[p,anchor=south,minimum height=0.8em] (w1_8) at ([xshift=0.3em]w1_7.south east){};
\node[p,anchor=south, minimum height=0.5em] (w2_1) at ([xshift=-1.8em,yshift=1.5em]decoder.north){};
\node[p,anchor=south,minimum height=2em] (w2_2) at ([xshift=0.3em]w2_1.south east){};
\node[p,anchor=south,minimum height=0.7em] (w2_3) at ([xshift=0.3em]w2_2.south east){};
\node[p,anchor=south,minimum height=0.6em] (w2_4) at ([xshift=0.3em]w2_3.south east){};
\node[p,anchor=south,minimum height=1.9em] (w2_5) at ([xshift=0.3em]w2_4.south east){};
\node[p,anchor=south,minimum height=0.3em] (w2_6) at ([xshift=0.3em]w2_5.south east){};
\node[p,anchor=south,minimum height=0.6em] (w2_7) at ([xshift=0.3em]w2_6.south east){};
\node[p,anchor=south,minimum height=0.8em] (w2_8) at ([xshift=0.3em]w2_7.south east){};
\node[p,anchor=south, minimum height=0.5em] (w3_1) at ([xshift=3.2em,yshift=1.5em]decoder.north){};
\node[p,anchor=south,minimum height=2em] (w3_2) at ([xshift=0.3em]w3_1.south east){};
\node[p,anchor=south,minimum height=0.7em] (w3_3) at ([xshift=0.3em]w3_2.south east){};
\node[p,anchor=south,minimum height=0.6em] (w3_4) at ([xshift=0.3em]w3_3.south east){};
\node[p,anchor=south,minimum height=1.9em] (w3_5) at ([xshift=0.3em]w3_4.south east){};
\node[p,anchor=south,minimum height=0.3em] (w3_6) at ([xshift=0.3em]w3_5.south east){};
\node[p,anchor=south,minimum height=0.6em] (w3_7) at ([xshift=0.3em]w3_6.south east){};
\node[p,anchor=south,minimum height=0.8em] (w3_8) at ([xshift=0.3em]w3_7.south east){};
\node[inner sep=0pt,font=\scriptsize] at ([yshift=0.3em]w1_2.north){thanks};
\node[inner sep=0pt,font=\scriptsize] at ([yshift=0.3em]w2_2.north){a};
\node[inner sep=0pt,font=\scriptsize] at ([yshift=0.3em]w2_5.north){to};
\node[inner sep=0pt,font=\scriptsize] at ([yshift=0.3em]w3_2.north){lot};
\node[inner sep=0pt,font=\scriptsize] at ([yshift=0.3em]w3_5.north){you};
\draw[-latex, very thick, ublue] ([yshift=0.1em]en1.north) -- ([xshift=-3em,yshift=-0.1em]encoder.south);
\draw[-latex, very thick, ublue] ([yshift=0.1em]en2.north) -- ([xshift=0em,yshift=-0.1em]encoder.south);
\draw[-latex, very thick, ublue] ([yshift=0.1em]en3.north) -- ([xshift=3em,yshift=-0.1em]encoder.south);
\draw[-latex, very thick, ublue] ([yshift=0.1em]de1.north) -- ([xshift=-4em,yshift=-0.1em]decoder.south);
\draw[-latex, very thick, ublue] ([yshift=0.1em]de2.north) -- ([xshift=0em,yshift=-0.1em]decoder.south);
\draw[-latex, very thick, ublue] ([yshift=0.1em]de3.north) -- ([xshift=4em,yshift=-0.1em]decoder.south);
\draw[-latex, very thick, ublue] (encoder.east) -- (decoder.west);
\begin{pgfonlayer}{background}
{
\node[inner sep=2pt] [fit =(w1_1)(w1_2)(w1_8)](box1) {};
\node[inner sep=2pt] [fit =(w2_1)(w2_2)(w2_8)] (box2){};
\node[inner sep=2pt] [fit =(w3_1)(w3_2)(w3_8)] (box3){};
}
\end{pgfonlayer}
\draw[-latex, very thick, ublue] ([yshift=-1.2em]box1.south) -- (box1.south);
\draw[-latex, very thick, ublue] ([yshift=-1.2em]box2.south) -- (box2.south);
\draw[-latex, very thick, ublue] ([yshift=-1.2em]box3.south) -- (box3.south);
\node[tgt,anchor=west] (tgt1) at ([xshift=2em]box3.east) {thanks a lot};
\node[tgt,,anchor=north](tgt2) at ([yshift=-1em]tgt1.south) {thanks to you};
\node[tgt,,anchor=north] (tgt3) at ([yshift=-1em]tgt2.south) {thanks a you};
\node[tgt,,anchor=north] (tgt4) at ([yshift=-1em]tgt3.south) {thanks to lot};
\node[text=ugreen] at ([xshift=1em]tgt1.east){\ding{51}};
\node[text=ugreen] at ([xshift=1em]tgt2.east){\ding{51}};
\node[text=red] at ([xshift=1em]tgt3.east){\ding{55}};
\node[text=red] at ([xshift=1em]tgt4.east){\ding{55}};
\end{tikzpicture}
\definecolor{ublue}{rgb}{0.152,0.250,0.545}
\definecolor{ugreen}{rgb}{0,0.5,0}
%%% outline
%-------------------------------------------------------------------------
\begin{tikzpicture}
\tikzstyle{word} = [draw=ugreen!20,minimum size=1.8em, fill=ugreen!40, font=\scriptsize, rounded corners=1pt]
\node[rounded corners=3pt, fill=red!20, drop shadow, minimum width=12em,minimum height=4em] (encoder) at (0,0) {Transformer 编码器 };
\node[draw=blue!10,anchor=west, rounded corners=2pt, fill=blue!20,minimum width=2.5cm,minimum height=2em] (attention) at ([xshift=0.8cm]encoder.east) {注意力模块};
\node[anchor=west, rounded corners=3pt, fill=red!20, drop shadow, minimum width=14em,minimum height=4em] (decoder) at ([xshift=0.8cm]attention.east) {Transformer 解码器};
\node[anchor=north,word] (en1) at ([yshift=-1.6em,xshift=-4.8em]encoder.south) {hello};
\node[anchor=north,word] (en2) at ([yshift=-1.6em,xshift=-1.6em]encoder.south) {,};
\node[anchor=north,word] (en3) at ([yshift=-1.6em,xshift=1.6em]encoder.south) {world};
\node[anchor=north,word] (en4) at ([yshift=-1.6em,xshift=4.8em]encoder.south) {!};
\node[anchor=north,word] (de1) at ([yshift=-1.6em,xshift=-5.75em]decoder.south) {1};
\node[anchor=north,word] (de2) at ([yshift=-1.6em,xshift=-3.45em]decoder.south) {2};
\node[anchor=north,word] (de3) at ([yshift=-1.6em,xshift=-1.15em]decoder.south) {3};
\node[anchor=north,word] (de4) at ([yshift=-1.6em,xshift=1.15em]decoder.south) {4};
\node[anchor=north,word] (de5) at ([yshift=-1.6em,xshift=3.45em]decoder.south) {5};
\node[anchor=north,word] (de6) at ([yshift=-1.6em,xshift=5.75em]decoder.south) {6};
\node[anchor=south,word] (out1) at ([yshift=1.6em,xshift=-5.75em]decoder.north) {};
\node[anchor=south,word] (out2) at ([yshift=1.6em,xshift=-3.45em]decoder.north) {};
\node[anchor=south,word] (out3) at ([yshift=1.6em,xshift=-1.15em]decoder.north) {};
\node[anchor=south,word] (out4) at ([yshift=1.6em,xshift=1.15em]decoder.north) {};
\node[anchor=south,word] (out5) at ([yshift=1.6em,xshift=3.45em]decoder.north) {};
\node[anchor=south,word] (out6) at ([yshift=1.6em,xshift=5.75em]decoder.north) {};
\draw[-latex, very thick,ublue] ([yshift=0.1em]en1.north) -- ([xshift=-4.8em,yshift=-0.1em]encoder.south);
\draw[-latex, very thick,ublue] ([yshift=0.1em]en2.north) -- ([xshift=-1.6em,yshift=-0.1em]encoder.south);
\draw[-latex, very thick,ublue] ([yshift=0.1em]en3.north) -- ([xshift=1.6em,yshift=-0.1em]encoder.south);
\draw[-latex, very thick,ublue] ([yshift=0.1em]en4.north) -- ([xshift=4.8em,yshift=-0.1em]encoder.south);
\draw[-latex, very thick,ublue] ([yshift=0.1em]de1.north) -- ([xshift=-5.75em]decoder.south);
\draw[-latex, very thick,ublue] ([yshift=0.1em]de2.north) -- ([xshift=-3.45em]decoder.south);
\draw[-latex, very thick,ublue] ([yshift=0.1em]de3.north) -- ([xshift=-1.15em]decoder.south);
\draw[-latex, very thick,ublue] ([yshift=0.1em]de4.north) -- ([xshift=1.15em]decoder.south);
\draw[-latex, very thick,ublue] ([yshift=0.1em]de5.north) -- ([xshift=3.45em]decoder.south);
\draw[-latex, very thick,ublue] ([yshift=0.1em]de6.north) -- ([xshift=5.75em]decoder.south);
\draw[-latex, very thick,ublue] ([xshift=-5.75em,yshift=0.1em]decoder.north) -- ([yshift=-0.1em]out1.south);
\draw[-latex, very thick,ublue] ([xshift=-3.45em,yshift=0.1em]decoder.north) -- ([yshift=-0.1em]out2.south);
\draw[-latex, very thick,ublue] ([xshift=-1.15em,yshift=0.1em]decoder.north) -- ([yshift=-0.1em]out3.south);
\draw[-latex, very thick,ublue] ([xshift=1.15em,yshift=0.1em]decoder.north) -- ([yshift=-0.1em]out4.south);
\draw[-latex, very thick,ublue] ([xshift=3.45em,yshift=0.1em]decoder.north) -- ([yshift=-0.1em]out5.south);
\draw[-latex, very thick,ublue] ([xshift=5.75em,yshift=0.1em]decoder.north) -- ([yshift=-0.1em]out6.south);
\draw[-latex, very thick, ublue] (encoder.east) -- (attention.west);
\draw[-latex, very thick, ublue] (attention.east) -- (decoder.west);
\draw[decorate,decoration={brace, mirror},ublue, very thick] ([yshift=-0.4em]de1.-135) -- node[font=\scriptsize,text=black,yshift=-1em]{predict traget length \& get position embedding}([yshift=-0.4em]de6.-45);
%\begin{pgfonlayer}{background}
%{
%\node[rectangle,draw=ublue, inner sep=0mm] [fit =(original1)(ht1)(mt1)(ht1-4)] {};
%}
%\end{pgfonlayer}
\end{tikzpicture}
\definecolor{ublue}{rgb}{0.152,0.250,0.545}
\definecolor{ugreen}{rgb}{0,0.5,0}
%%% outline
%-------------------------------------------------------------------------
\begin{tikzpicture}[scale=0.7]
\tikzstyle{every node}=[scale=0.7]
\tikzstyle{emb} = [draw,very thick, minimum height=2.4em,minimum width=3.6em,rounded corners=2pt,font=\footnotesize]
\tikzstyle{attention} = [fill=blue!15,draw,very thick, minimum width=19.6em,minimum height=2.4em, rounded corners=2pt,font=\footnotesize]
\tikzstyle{line} = [line width=1.6pt,->]
\foreach \x in {1,2,3,4,5}
\node[emb,fill=gray!30] (enemb_\x) at (0+4em*\x,0){\small\bfnew{Emb}};
\foreach \x in {1,2,3,4,5}
\node[emb,fill=gray!30] (deemb_\x) at (24em+4em*\x,0){\small\bfnew{Emb}};
\node[attention] (a1) at (12em, 5em) {\small\bfnew{Multi-Head Self-Attention}};
\node[attention] (a2) at (36em, 5em) {\small\bfnew{Multi-Head Self-Attention}};
\node[attention] (a3) at (36em, 9em) {\small\bfnew{Multi-Head Self-Attention}};
\node[attention] (a4) at (36em, 13em) {\small\bfnew{Multi-Head Self-Attention}};
\foreach \x in {1,2,3,4,5}
\node[emb,fill=cyan!35] (enmlp_\x) at (0+4em*\x,9em){\small\bfnew{MLP}};
\foreach \x in {1,2,3,4,5}
\node[emb,fill=cyan!35] (demlp_\x) at (24em+4em*\x,17em){\small\bfnew{MLP}};
\foreach \x in {1,2,3,4,5}
\node[emb,fill=red!25,inner sep=1pt] (ensf_\x) at (0+4em*\x,17em){\small\bfnew{SoftMax}};
\foreach \x in {1,2,3,4,5}
\node[emb,fill=ugreen!25!white,inner sep=1pt, font=\scriptsize] (desf_\x) at (24em+4em*\x,22em){\small\bfnew{SoftMax}};
\node[minimum height=2.4em,minimum width=3.6em] (enout_1) at (0+4em*1,21em){\small\bfnew{1}};
\node[minimum height=2.4em,minimum width=3.6em] (enout_2) at (0+4em*2,21em){\small\bfnew{1}};
\node[minimum height=2.4em,minimum width=3.6em] (enout_3) at (0+4em*3,21em){\small\bfnew{2}};
\node[minimum height=2.4em,minimum width=3.6em] (enout_4) at (0+4em*4,21em){\small\bfnew{0}};
\node[minimum height=2.4em,minimum width=3.6em] (enout_5) at (0+4em*5,21em){\small\bfnew{1}};
\node[minimum height=2.4em,minimum width=3.6em,inner sep=0pt,font=\footnotesize] (deout_1) at (24em+4em*1,26em){\small\bfnew{I}};
\node[minimum height=2.4em,minimum width=3.6em,inner sep=0pt,font=\footnotesize] (deout_2) at (24em+4em*2,26em){\small\bfnew{love}};
\node[minimum height=2.4em,minimum width=3.6em,inner sep=0pt,font=\footnotesize] (deout_3) at (24em+4em*3,26em){\small\bfnew{my}};
\node[minimum height=2.4em,minimum width=3.6em,inner sep=0pt,font=\footnotesize] (deout_4) at (24em+4em*4,26em){\small\bfnew{dog}};
\node[minimum height=2.4em,minimum width=3.6em,inner sep=0pt,font=\footnotesize] (deout_5) at (24em+4em*5,26em){\small\bfnew{.}};
\foreach \x in {1,2,3,4,5}{
\draw[line] (enemb_\x.north) -- ([yshift=2.6em]enemb_\x.north);
\draw[line] ([yshift=4.9em]enemb_\x.north) -- (enmlp_\x.south);
\draw[line] (enmlp_\x.north) -- (ensf_\x.south);
\draw[line,dotted] (ensf_\x.north) -- (enout_\x.south);
\draw[line] (deemb_\x.north) -- ([yshift=2.6em]deemb_\x.north);
\draw[line] ([yshift=4.9em]deemb_\x.north) -- ([yshift=6.5em]deemb_\x.north);
\draw[line] ([yshift=8.8em]deemb_\x.north) -- ([yshift=10.4em]deemb_\x.north);
\draw[line] ([yshift=12.9em]deemb_\x.north) -- (demlp_\x.south);
\draw[line] (demlp_\x.north) -- (desf_\x.south);
\draw[line,dotted] (desf_\x.north) -- (deout_\x.south);
}
\begin{pgfonlayer}{background}
{
\node[draw=blue!25,very thick,fill=blue!5,rounded corners=6pt,minimum height=12em,minimum width=21em,yshift=-1em] at (12em,9em) (box1){};
\node[draw=red!25,very thick,fill=red!10,rounded corners=6pt,minimum height=7.4em,minimum width=21em,yshift=-1em] at (12em,19.2em) (box2){};
\node[draw=cyan!25,very thick,fill=cyan!15,rounded corners=6pt,minimum height=17.5em,minimum width=22.6em,yshift=-1em] at (36em,11.8em) (box3){};
\node[draw=ugreen!25,very thick,fill=green!15!white,rounded corners=6pt,minimum height=7em,minimum width=22.6em,yshift=-1em] at (36em,24.5em) (box4){};
}
\draw[line] (enmlp_1.north) .. controls ([yshift=3.6em]enmlp_1.north) and ([xshift=-2em]a4.west) .. (a4.west);
\draw[line] (enmlp_2.north) .. controls ([yshift=3.6em]enmlp_2.north) and ([xshift=-2em]a4.west) .. (a4.west);
\draw[line] (enmlp_3.north) .. controls ([yshift=3.6em]enmlp_3.north) and ([xshift=-2em]a4.west) .. (a4.west);
\draw[line] (enmlp_4.north) .. controls ([yshift=3.6em]enmlp_4.north) and ([xshift=-2em]a4.west) .. (a4.west);
\draw[line] (enmlp_5.north) .. controls ([yshift=3.6em]enmlp_5.north) and ([xshift=-2em]a4.west) .. (a4.west);
\end{pgfonlayer}
\node[anchor=east,draw,circle,minimum size=2em,thick,inner sep=0pt,fill=gray!40] at ([xshift=-2em]enemb_1.west) (pos1){\Large$\sim$};
\node[anchor=west,draw,circle,minimum size=2em,thick,inner sep=0pt,fill=gray!40] at ([xshift=2.4em]deemb_5.east) (pos2){\Large$\sim$};
\node[anchor=west,draw,circle,minimum size=2em,thick,inner sep=0pt,fill=gray!40] at ([xshift=2.4em]a3.east) (pos3){\Large$\sim$};
\draw[line] (pos1.0) -- (enemb_1.180);
\draw[line] (pos2.180) -- (deemb_5.0);
\draw[line] (pos3.180) -- (a3.0);
\draw[violet,dotted,rounded corners=4pt,very thick,->] (box2.north) -- ([yshift=2em]box2.north) -- ([yshift=2em,xshift=11.5em]box2.north) -- ([xshift=1.9em]enemb_5.east) -- (deemb_1.west);
\draw[violet,dotted,very thick,->] (enemb_5.east) -- node[below,violet,font=\footnotesize]{\emph{Copy}}(deemb_1.west);
\node[] at ([xshift=-1em,yshift=-4em]box1.west){{$N \times$}};
\node[] at ([xshift=1em,yshift=-7em]box3.east){{$\times N$}};
\node[font=\footnotesize,align=left] at ([xshift=-2em,yshift=2em]box1.west) {\small\bfnew{Encoder} \\ \small\bfnew{Stack}};
\node[font=\footnotesize,align=left] at ([xshift=-2em]box2.west) {\small\bfnew{Fertility} \\ \small\bfnew{Predictor}};
\node[font=\footnotesize,align=left] at ([xshift=0.4em,yshift=-1.2em]pos3.south) {\small\bfnew{Positional} \\ \small\bfnew{Encoding}};
\node[font=\footnotesize,align=left] at ([xshift=2em,yshift=2em]box3.east) {\small\bfnew{Decoder} \\ \small\bfnew{Stack}};
\node[font=\footnotesize,align=left] at ([xshift=2.5em]box4.east) {\small\bfnew{Translation} \\ \small\bfnew{Predictor}};
\node[font=\footnotesize] at ([yshift=-1em]enemb_1.south) {\small\bfnew{}};
\node[font=\footnotesize] at ([yshift=-1em]enemb_2.south) {\small\bfnew{}};
\node[font=\footnotesize] at ([yshift=-1em]enemb_3.south) {\small\bfnew{我的}};
\node[font=\footnotesize] at ([yshift=-1em]enemb_4.south) {\small\bfnew{}};
\node[font=\footnotesize] at ([yshift=-1em]enemb_5.south) {\small\bfnew{}};
\end{tikzpicture}
\definecolor{Melon}{rgb}{0.99, 0.74, 0.71}
\definecolor{Goldenrod}{rgb}{0.85, 0.65, 0.13}
\definecolor{Cerulean}{rgb}{0, 0.48, 0.65}
\definecolor{Gray}{rgb}{0.5, 0.5, 0.5}
\tikzstyle{emb} = [rectangle,very thick,rounded corners,minimum width=3cm,minimum height=0.85cm,text centered,draw=black!70,fill=Melon!25]
\tikzstyle{sa} = [rectangle,very thick,rounded corners,minimum width=3cm,minimum height=1cm,text centered,draw=black!70,fill=Goldenrod!25]
\tikzstyle{edsa} = [rectangle,very thick,rounded corners,minimum width=3cm,minimum height=1.5cm,text centered,align=center,draw=black!70,fill=Goldenrod!25]
\tikzstyle{an} = [rectangle,very thick,rounded corners,minimum width=3cm,minimum height=0.7cm,text centered,draw=black!70,fill=ugreen!20]
\tikzstyle{ff} = [rectangle,very thick,rounded corners,minimum width=3cm,minimum height=1cm,text centered,align=center,draw=black!70,fill=Cerulean!10]
\tikzstyle{linear} = [rectangle,very thick,rounded corners,minimum width=3cm,minimum height=0.7cm,text centered,draw=black!70,fill=Gray!20]
\tikzstyle{softmax} = [rectangle,very thick,rounded corners,minimum width=3cm,minimum height=0.7cm,text centered,draw=black!70,fill=Melon!40]
\begin{tikzpicture}[node distance = 0,scale = 0.7]
\tikzstyle{every node}=[scale=0.7]
%left
\node(left_Emb)[emb]{\footnotesize{\textbf{Input Embedding}}};
\node(left_cir)[circle,very thick,minimum width=0.5cm,draw=black!70,above of = left_Emb,yshift=1.1cm]{};
\draw[-,very thick,draw=black!70]([xshift=0.03cm]left_cir.west)--([xshift=-0.03cm]left_cir.east);
\draw[-,very thick,draw=black!70]([yshift=-0.03cm]left_cir.north)--([yshift=0.03cm]left_cir.south);
\node(left_cir2)[circle,very thick,minimum width=0.5cm,draw=black!70,left of = left_cir,xshift=-1.5cm]{};
\draw[very thick,draw=black!70]([xshift=0.04cm]left_cir2.west)sin([xshift=0.14cm,yshift=0.08cm]left_cir2.west)cos([xshift=0.25cm]left_cir2.west)sin([xshift=0.36cm,yshift=-0.08cm]left_cir2.west)cos([xshift=-0.03cm]left_cir2.east);
\draw [->,very thick,draw=black!70](left_cir2.east)--(left_cir.west);
\node(left_Self)[sa,above of = left_cir,yshift=1.6cm]{\textbf{Self-attention}};
\node(left_Add_bottom)[an,above of = left_Self,yshift=1.1cm]{\textbf{Add$\&\&$Norm}};
\node(left_Feed)[ff,above of = left_Add_bottom,yshift=1.2cm]{\textbf{Feed}\\\textbf{Forward}};
\node(left_Add_top)[an,above of = left_Feed,yshift=1.1cm]{\textbf{Add$\&\&$Norm}};
\node(left_text_bottom)[below of = left_Emb,xshift=0cm,yshift=-1.2cm,scale=1]{\small\sffamily\bfseries{我爱我的狗}};
\draw [->,very thick,draw=black!70]([yshift=-0.5cm]left_Emb.south)--(left_Emb.south);
\draw [->,very thick,draw=black!70](left_Emb.north)--(left_cir.south);
\draw [->,very thick,draw=black!70](left_cir.north)--(left_Self.south);
\draw [->,very thick,draw=black!70](left_Self.north)--(left_Add_bottom.south);
\draw [->,very thick,draw=black!70](left_Add_bottom.north)--(left_Feed.south);
\draw [->,very thick,draw=black!70](left_Feed.north)--(left_Add_top.south);
\draw [->,very thick,draw=black!70]([yshift=0.35cm]left_cir.north)--([xshift=-2cm,yshift=0.35cm]left_cir.north)--([xshift=-0.5cm]left_Add_bottom.west)--(left_Add_bottom.west);
\draw [->,very thick,draw=black!70]([yshift=0.1cm]left_Add_bottom.north)--([xshift=-2cm,yshift=0.1cm]left_Add_bottom.north)--([xshift=-0.5cm]left_Add_top.west)--(left_Add_top.west);
\draw[->,very thick,draw=black!70,in=250,out=0] ([yshift=0.5cm]left_cir.north)to([xshift=0.9cm]left_Self.south);
\draw[->,very thick,draw=black!70,in=290,out=180] ([yshift=0.5cm]left_cir.north)to([xshift=-0.9cm]left_Self.south);
%middle
\node(Emb)[emb,right of = left_Emb,xshift=5cm]{\footnotesize{\textbf{Parser Embedding}}};
\node(cir)[circle,very thick,draw=black!70,minimum width=0.5cm,above of = Emb,yshift=1.1cm]{};
\draw[-,very thick,draw=black!70]([xshift=0.03cm]cir.west)--([xshift=-0.03cm]cir.east);
\draw[-,very thick,draw=black!70]([yshift=-0.03cm]cir.north)--([yshift=0.03cm]cir.south);
\node(cir2)[circle,very thick,minimum width=0.5cm,draw=black!70,right of = cir,xshift=1.5cm]{};
\draw[very thick,draw=black!70]([xshift=0.04cm]cir2.west)sin([xshift=0.14cm,yshift=0.08cm]cir2.west)cos([xshift=0.25cm]cir2.west)sin([xshift=0.36cm,yshift=-0.08cm]cir2.west)cos([xshift=-0.03cm]cir2.east);
\node(Self)[sa,above of = cir,yshift=1.6cm]{\textbf{Self-attention}};
\node(Add_bottom)[an,above of = Self,yshift=1.1cm]{\textbf{Add$\&\&$Norm}};
\node(ED_Self)[edsa,above of = Add_bottom,yshift=1.8cm]{\textbf{Encoder-Decoder}\\ \textbf{Attention}};
\node(Add_mid)[an,above of = ED_Self,yshift=1.35cm]{\textbf{Add$\&\&$Norm}};
\node(Feed)[ff,above of = Add_mid,yshift=1.2cm]{\textbf{Feed}\\ \textbf{Forward}};
\node(Add_top)[an,above of = Feed,yshift=1.1cm]{\textbf{Add$\&\&$Norm}};
\node(Linear)[linear,above of = Add_top,yshift=1.3cm]{\textbf{Linear}};
\node(Softmax)[softmax,above of = Linear,yshift=1cm]{\textbf{Softmax}};
\node(text_bottom)[below of = Emb,xshift=0.2cm,yshift=-1.2cm,scale=0.9]{\textbf{VP1\ \ VP3\ \ <eos>}};
\node(text_top)[above of = Softmax,xshift=0.2cm,yshift=1.2cm,scale=0.9]{\textbf{VP1\ \ VP3\ \ <eos>}};
\draw [->,very thick,draw=black!70]([yshift=-0.5cm]Emb.south)--(Emb.south);
\draw [->,very thick,draw=black!70]([xshift=0.9cm,yshift=-0.5cm]Emb.south)--([xshift=0.9cm]Emb.south);
\draw [->,very thick,draw=black!70]([xshift=-0.9cm,yshift=-0.5cm]Emb.south)--([xshift=-0.9cm]Emb.south);
\draw [->,very thick,draw=black!70](Emb.north)--(cir.south);
\draw [->,very thick,draw=black!70](cir.north)--(Self.south);
\draw [->,very thick,draw=black!70](cir2.west)--(cir.east);
\draw[->,very thick,draw=black!70,in=250,out=0] ([yshift=0.5cm]cir.north)to([xshift=0.9cm]Self.south);
\draw[->,very thick,draw=black!70,in=290,out=180] ([yshift=0.5cm]cir.north)to([xshift=-0.9cm]Self.south);
\draw [->,very thick,draw=black!70](Self.north)--(Add_bottom.south);
\draw [->,very thick,draw=black!70](ED_Self.north)--(Add_mid.south);
\draw [->,very thick,draw=black!70](Add_mid.north)--(Feed.south);
\draw [->,very thick,draw=black!70](Feed.north)--(Add_top.south);
\draw [->,very thick,draw=black!70](Add_top.north)--(Linear.south);
\draw [->,very thick,draw=black!70](Linear.north)--(Softmax.south);
\draw [->,very thick,draw=black!70](Softmax.north)--([yshift=0.5cm]Softmax.north);
\draw [->,very thick,draw=black!70]([yshift=0.35cm]cir.north)--([xshift=2cm,yshift=0.35cm]cir.north)--([xshift=0.5cm]Add_bottom.east)--(Add_bottom.east);
\draw [->,very thick,draw=black!70]([yshift=0.1cm]Add_mid.north)--([xshift=2cm,yshift=0.1cm]Add_mid.north)--([xshift=0.5cm]Add_top.east)--(Add_top.east);
\draw [->,very thick,draw=black!70](left_Add_top.north)--([yshift=0.6cm]left_Add_top.north)--([xshift=2.2cm,yshift=0.6cm]left_Add_top.north)--([xshift=2.2cm,yshift=-2cm]left_Add_top.north)--([xshift=5cm,yshift=-2cm]left_Add_top.north)--(ED_Self.south);
\draw [->,very thick,draw=black!70]([xshift=0.9cm,yshift=-0.45cm]ED_Self.south)--([xshift=0.9cm]ED_Self.south);
\draw [->,very thick,draw=black!70]([xshift=-0.9cm,yshift=-0.35cm]ED_Self.south)--([xshift=-0.9cm]ED_Self.south);
\draw [->,very thick,draw=black!70](Add_bottom.north)--([yshift=0.2cm]Add_bottom.north)--([xshift=2cm,yshift=0.2cm]Add_bottom.north)--([xshift=0.5cm]Add_mid.east)--(Add_mid.east);
%right
\node(right_Emb)[emb,right of = Emb,xshift=5.5cm]{\footnotesize{\textbf{Parser Embedding}}};
\node(right_cir)[circle,very thick,minimum width=0.5cm,draw=black!70,above of = right_Emb,yshift=1.1cm]{};
\draw[-,very thick,draw=black!70]([xshift=0.03cm]right_cir.west)--([xshift=-0.03cm]right_cir.east);
\draw[-,very thick,draw=black!70]([yshift=-0.03cm]right_cir.north)--([yshift=0.03cm]right_cir.south);
\node(right_cir2)[circle,very thick,minimum width=0.5cm,draw=black!70,right of = right_cir,xshift=1.5cm]{};
\draw[very thick,draw=black!70]([xshift=0.04cm]right_cir2.west)sin([xshift=0.14cm,yshift=0.08cm]right_cir2.west)cos([xshift=0.25cm]right_cir2.west)sin([xshift=0.36cm,yshift=-0.08cm]right_cir2.west)cos([xshift=-0.03cm]right_cir2.east);
\node(right_Self)[sa,above of = right_cir,yshift=1.6cm]{\textbf{Self-attention}};
\node(right_Add_bottom)[an,above of = right_Self,yshift=1.1cm]{\textbf{Add$\&\&$Norm}};
\node(right_ED_Self)[edsa,above of = right_Add_bottom,yshift=1.8cm]{\textbf{Encoder-Decoder}\\\textbf{Attention}};
\node(right_Add_mid)[an,above of = right_ED_Self,yshift=1.35cm]{\textbf{Add$\&\&$Norm}};
\node(right_Feed)[ff,above of = right_Add_mid,yshift=1.2cm]{\textbf{Feed}\\\textbf{Forward}};
\node(right_Add_top)[an,above of = right_Feed,yshift=1.1cm]{\textbf{Add$\&\&$Norm}};
\node(right_Linear)[linear,above of = right_Add_top,yshift=1.3cm]{\textbf{Linear}};
\node(right_Softmax)[softmax,above of = right_Linear,yshift=1cm]{\textbf{Softmax}};
\node(right_text_bottom)[below of = right_Emb,xshift=1.2cm,yshift=-1.2cm,scale=0.8]{\textbf{VP1 <Mask> VP3 <Mask> <Mask <Mask>}};
\node(right_text_top)[above of = right_Softmax,xshift=0cm,yshift=1.2cm,scale=0.9]{\textbf{VP1 I VP3 love my dog}};
\draw [->,very thick,draw=black!70]([yshift=-0.5cm]right_Emb.south)--(right_Emb.south);
\draw [->,very thick,draw=black!70](right_Emb.north)--(right_cir.south);
\draw [->,very thick,draw=black!70](right_cir.north)--(right_Self.south);
\draw [->,very thick,draw=black!70](right_cir2.west)--(right_cir.east);
\draw[->,very thick,draw=black!70,in=250,out=0] ([yshift=0.5cm]right_cir.north)to([xshift=0.9cm]right_Self.south);
\draw[->,very thick,draw=black!70,in=290,out=180] ([yshift=0.5cm]right_cir.north)to([xshift=-0.9cm]right_Self.south);
\draw [->,very thick,draw=black!70](right_Self.north)--(right_Add_bottom.south);
\draw [->,very thick,draw=black!70](right_ED_Self.north)--(right_Add_mid.south);
\draw [->,very thick,draw=black!70](right_Add_mid.north)--(right_Feed.south);
\draw [->,very thick,draw=black!70](right_Feed.north)--(right_Add_top.south);
\draw [->,very thick,draw=black!70](right_Add_top.north)--(right_Linear.south);
\draw [->,very thick,draw=black!70](right_Linear.north)--(right_Softmax.south);
\draw [->,very thick,draw=black!70](right_Softmax.north)--([yshift=0.5cm]right_Softmax.north);
\draw [->,very thick,draw=black!70]([yshift=0.35cm]right_cir.north)--([xshift=2cm,yshift=0.35cm]right_cir.north)--([xshift=0.5cm]right_Add_bottom.east)--(right_Add_bottom.east);
\draw [->,very thick,draw=black!70]([yshift=0.1cm]right_Add_mid.north)--([xshift=2cm,yshift=0.1cm]right_Add_mid.north)--([xshift=0.5cm]right_Add_top.east)--(right_Add_top.east);
\draw [->,very thick,draw=black!70]([xshift=0.9cm,yshift=-0.45cm]right_ED_Self.south)--([xshift=0.9cm]right_ED_Self.south);
\draw [->,very thick,draw=black!70]([xshift=-0.9cm,yshift=-0.35cm]right_ED_Self.south)--([xshift=-0.9cm]right_ED_Self.south);
\draw [-,very thick,dashed,draw=black!70]([xshift=2.2cm,yshift=0.6cm]left_Add_top.north)--([xshift=2.2cm,yshift=3.5cm]left_Add_top.north)--([xshift=8cm,yshift=3.5cm]left_Add_top.north)--([xshift=8cm,yshift=-2cm]left_Add_top.north);
\draw [->,very thick,draw=black!70](right_Add_bottom.north)--([yshift=0.2cm]right_Add_bottom.north)--([xshift=2cm,yshift=0.2cm]right_Add_bottom.north)--([xshift=0.5cm]right_Add_mid.east)--(right_Add_mid.east);
\draw [->,very thick,draw=black!70]([xshift=8cm,yshift=-2cm]left_Add_top.north)--([yshift=0.3cm]right_Add_bottom.north)--(right_ED_Self.south);
\draw [->,very thick,draw=black!70](Softmax.east)--([xshift=1.2cm]Softmax.east)--([xshift=1.2cm,yshift=-12.75cm]Softmax.east)--([xshift=2cm,yshift=-12.75cm]Softmax.east);
%module
\node(left_module)[rectangle,very thick,rounded corners,minimum width=4.3cm,minimum height=5.3cm,text centered,draw=black!70,above of = left_Emb,xshift=-0.25cm,yshift=4.1cm]{};
\node(module)[rectangle,very thick,rounded corners,minimum width=4.3cm,minimum height=8.4cm,text centered,draw=black!70,above of = Emb,xshift=0.25cm,yshift=5.65cm]{};
\node(right_module)[rectangle,very thick,rounded corners,minimum width=4.3cm,minimum height=8.4cm,text centered,draw=black!70,above of = right_Emb,xshift=0.25cm,yshift=5.65cm]{};
\node(N)[right of = right_ED_Self,xshift=3cm,scale=1.3]{\textbf{$N\times$}};
\node(left_N)[left of = left_Feed,xshift=-3cm,scale=1.3]{\textbf{$N\times$}};
\node(M)[left of = ED_Self,xshift=-2.3cm,scale=1.3]{\textbf{$M\times$}};
\end{tikzpicture}
\ No newline at end of file
% set table width
\newcommand{\PreserveBackslash}[1]{\let\temp=\\#1\let\\=\temp}
\newcolumntype{C}[1]{>{\PreserveBackslash\centering}p{#1}}
% used for heatmap
\newcommand*{\MinNumber}{0}%
\newcommand*{\MaxNumber}{1}%
\newcommand{\ApplyGradient}[1]{%
\pgfmathsetmacro{\PercentColor}{100.0*(#1-\MinNumber)/(\MaxNumber-\MinNumber)}
\hspace{-0.33em}\colorbox{white!\PercentColor!myblack}{}
}
\newcolumntype{Q}{>{\collectcell\ApplyGradient}c<{\endcollectcell}}
\begin{center}
\renewcommand{\arraystretch}{0}
\setlength{\tabcolsep}{5mm}
\setlength{\fboxsep}{2.2mm} % box size
\begin{tabular}{C{.20\textwidth}C{.20\textwidth}C{.20\textwidth}C{.20\textwidth}}
\setlength{\tabcolsep}{0pt}
\subfigure [\footnotesize{自注意力}] {
\begin{tabular}{cc}
\setlength{\tabcolsep}{0pt}
~
&
\begin{tikzpicture}
\begin{scope}
\node [inner sep=1.5pt] (w1) at (0,0) {\small{$1$} };
\foreach \x/\y/\z in {2/1/$2$, 3/2/$3$, 4/3/$4$, 5/4/$5$, 6/5/$6$}
{
\node [inner sep=1.5pt,anchor=south west] (w\x) at ([xshift=1.15em]w\y.south west) {\small{\z} };
}
\end{scope}
\end{tikzpicture}
\\
\renewcommand\arraystretch{1}
\begin{tabular}{c}
\setlength{\tabcolsep}{0pt}
\small{$1\ \ $} \\
\small{$2\ $} \\
\small{$3\ $} \\
\small{$4\ $} \\
\small{$5\ $} \\
\small{$6\ $} \\
\end{tabular}
&
%\setlength{\tabcolsep}{0pt}
\begin{tabular}{*{6}{Q}}
0.0000 & 0.5429 & 0.5138 & 0.4650 & 0.5005 & 0.5531 \\
0.5429 & 0.0000 & 0.0606 & 0.0630 & 0.0703 & 0.0332 \\
0.5138 & 0.0606 & 0.0000 & 0.0671 & 0.0472 & 0.0296 \\
0.4650 & 0.0630 & 0.0671 & 0.0000 & 0.0176 & 0.0552 \\
0.5005 & 0.0703 & 0.0472 & 0.0176 & 0.0000 & 0.0389 \\
0.5531 & 0.0332 & 0.0296 & 0.0552 & 0.0389 & 0.0000 \\
\end{tabular}
\end{tabular}
}
&
\subfigure [\footnotesize{编码-解码注意力}] {
\setlength{\tabcolsep}{0pt}
\begin{tabular}{cc}
\setlength{\tabcolsep}{0pt}
~
&
\begin{tikzpicture}
\begin{scope}
\node [inner sep=1.5pt] (w1) at (0,0) {\small{$1$} };
\foreach \x/\y/\z in {2/1/$2$, 3/2/$3$, 4/3/$4$, 5/4/$5$, 6/5/$6$}
{
\node [inner sep=1.5pt,anchor=south west] (w\x) at ([xshift=1.15em]w\y.south west) {\small{\z} };
}
\end{scope}
\end{tikzpicture}
\\
\renewcommand\arraystretch{1}
\begin{tabular}{c}
\setlength{\tabcolsep}{0pt}
\small{$1\ \ $} \\
\small{$2\ $} \\
\small{$3\ $} \\
\small{$4\ $} \\
\small{$5\ $} \\
\small{$6\ $} \\
\end{tabular}
&
%\setlength{\tabcolsep}{0pt}
\begin{tabular}{*{6}{Q}}
0.0000 & 0.0175 & 0.2239 & 0.3933 & 0.7986 & 0.3603 \\
0.0175 & 0.0000 & 0.1442 & 0.3029 & 0.7295 & 0.3324 \\
0.2239 & 0.1442 & 0.0000 & 0.0971 & 0.6270 & 0.4163 \\
0.3933 & 0.3029 & 0.0971 & 0.0000 & 0.2385 & 0.2022 \\
0.7986 & 0.7295 & 0.6270 & 0.2385 & 0.0000 & 0.0658 \\
0.3603 & 0.3324 & 0.4163 & 0.2022 & 0.0658 & 0.0000 \\
\end{tabular}
\end{tabular}
}
\end{tabular}
\end{center}
\begin{tikzpicture}
\begin{scope}
\node [anchor=north west] (pos1) at (0,0) {$\circ$};
\node [anchor= west] (pos2) at ([xshift=3.0em]pos1.east){$\circ$};
\node [anchor= west] (pos1-2) at ([xshift=1.0em,yshift=1.0em]pos1.east){I};
\draw[->,thick](pos1.east)--(pos2.west);
\node [anchor= west] (pos3) at ([xshift=3.0em]pos2.east){$\circ$};
\node [anchor= west] (pos2-2) at ([xshift=0.1em,yshift=1.0em]pos2.east){have};
\draw[->,thick](pos2.east)--(pos3.west);
\end{scope}
\begin{scope}[yshift=-4.0em]
\node [anchor=north west] (pos1) at (0,0) {$\circ$};
\node [anchor= west] (pos2) at ([xshift=3.0em]pos1.east){$\circ$};
\node [anchor= west] (pos1-2) at ([xshift=0.5em,yshift=1.0em]pos1.east){He};
\draw[->,thick](pos1.east)--(pos2.west);
\node [anchor= west] (pos3) at ([xshift=3.0em]pos2.east){$\circ$};
\node [anchor= west] (pos2-2) at ([xshift=0.1em,yshift=1.0em]pos2.east){has};
\draw[->,thick](pos2.east)--(pos3.west);
\node [anchor= west] (pos4) at ([xshift=5.0em]pos3.east){$\circ$};
\node [anchor= west] (pos5) at ([xshift=5.0em]pos4.east){$\circ$};
\node [anchor= west] (pos6) at ([xshift=5.0em]pos5.east){$\circ$};
\node [anchor= west] (word1) at ([xshift=2.0em,yshift=2.7em]pos4.east){I};
\node [anchor= west] (word2) at ([xshift=1.5em,yshift=-1.6em]pos4.east){He};
\node [anchor= west] (word3) at ([xshift=1.4em,yshift=-3em]pos4.east){She};
\node [anchor= west] (word4) at ([xshift=1.1em,yshift=2.8em]pos5.east){Have};
\node [anchor= west] (word5) at ([xshift=1.3em,yshift=-2.8em]pos5.east){Has};
\begin{pgfonlayer}{background}
{
% I
\draw [->,thick] (pos4.north) .. controls +(north:0.8) and +(north:0.8) .. (pos5.north);
% He
\draw [->,thick] (pos4.south) .. controls +(south:0.8) and +(south:0.8) .. (pos5.south);
% She
\draw [->,thick] (pos4.south) .. controls +(south:1.5) and +(south:1.5) .. (pos5.south);
% Have
\draw [->,thick] (pos5.north) .. controls +(north:0.8) and +(north:0.8) .. (pos6.north);
% Has
\draw [->,thick] (pos5.south) .. controls +(south:0.8) and +(south:0.8) .. (pos6.south);
}
\end{pgfonlayer}
\end{scope}
\begin{scope}[yshift=-8.0em]
\node [anchor=north west] (pos1) at (0,0) {$\circ$};
\node [anchor= west] (pos2) at ([xshift=3.0em]pos1.east){$\circ$};
\node [anchor= west] (pos1-2) at ([xshift=0.4em,yshift=1.0em]pos1.east){She};
\draw[->,thick](pos1.east)--(pos2.west);
\node [anchor= west] (pos3) at ([xshift=3.0em]pos2.east){$\circ$};
\node [anchor= west] (pos2-2) at ([xshift=0.1em,yshift=1.0em]pos2.east){has};
\draw[->,thick](pos2.east)--(pos3.west);
\end{scope}
\end{tikzpicture}
%---------------------------------------------------------------------
...@@ -21,10 +21,724 @@ ...@@ -21,10 +21,724 @@
% CHAPTER 14 % CHAPTER 14
%---------------------------------------------------------------------------------------- %----------------------------------------------------------------------------------------
\chapter{低资源神经机器翻译} \chapter{神经机器翻译模型推断}
\parinterval 与训练不同,神经机器翻译的推断要对新的句子进行翻译。由于训练时双语句子对模型是可见的,但是在推断阶段,模型需要根据输入的源语言句子预测译文,因此神经机器翻译的推断和训练过程有着很大的不同。特别是,推断系统往往对应着机器翻译实际部署的需要,因此推断系统的翻译精度和翻译速度等也是同时需要考虑的因素。
\parinterval 本章对神经机器翻译模型推断的若干问题进行讨论。主要涉及三方面内容:1)神经机器翻译的基本问题,如推断方向、译文长度控制等;2)神经机器翻译的推断加速方法,如轻量模型、非自回归模型等;3)多模型集成推断。
%----------------------------------------------------------------------------------------
% NEW SECTION
%----------------------------------------------------------------------------------------
\section{面临的挑战}
\parinterval 神经机器翻译的推断是指:对于输入的源语言句子$\seq{x}$,使用已经训练好的模型找到最佳译文$\hat{\seq{y}}$的过程$\hat{\seq{y}}=\arg\max\limits_{\seq{y}}\funp{P}(\seq{y}|\seq{x})$。这个过程也被称作解码。但是为了避免与神经机器翻译中编码器-解码器造成概念上的混淆,这里统一把翻译新句子的操作称作推断。以上这个过程是一个典型的搜索问题(见{\chaptertwo}),比如,可以使用贪婪搜索或者束搜索完成神经机器翻译的推断(见{\chapterten})。
\parinterval 通用的神经机器翻译推断包括如下几步:
\begin{itemize}
\vspace{0.5em}
\item 对输入的源语言句子进行编码;
\vspace{0.5em}
\item 使用源语言句子的编码结果,在目标语言端自左向右逐词生成译文;
\vspace{0.5em}
\item 在目标语言的每个位置计算模型得分,同时进行剪枝;
\vspace{0.5em}
\item 当满足某种条件时终止搜索。
\vspace{0.5em}
\end{itemize}
\parinterval 这个过程与统计机器翻译中自左向右翻译是一样的(见{\chapterseven}),即,在每一个目标语位置,根据已经生成的译文和源语言的信息,生成下一个译文单词\upcite{Koehn2007Moses,DBLP:conf/amta/Koehn04}。它可以由两个模块实现\upcite{DBLP:conf/emnlp/StahlbergHSB17}
\begin{itemize}
\vspace{0.5em}
\item 预测模块,也就是根据已经翻译的历史和源语言句子,预测下一个要生成单词的概率\footnote{在统计机器翻译中,翻译的每一步也可以预测短语。在神经机器翻译中也有类似于生成短语的方
法,但是主流的方法还是按单词为单位进行生成。}。因此预测模块实际上就是一个模型打分装置;
\vspace{0.5em}
\item 搜索模块,它会利用预测结果,对当前的翻译假设进行打分,并根据模型得分对翻译假设进行排序和剪枝。
\vspace{0.5em}
\end{itemize}
\parinterval 预测模块是由模型决定的,而搜索模块可以与模型无关。也就是说,不同的模型可以共享同一个搜索模块完成推断。比如,对于基于循环神经网络的模型,预测模块需要读入前一个状态的信息和前一个位置的译文单词,然后预测当前位置单词的概率;对于Transformer,预测模块需要对前面的所有位置做注意力运算,之后预测当前位置的单词概率。不过,这两个模型都可以使用同一个搜索模块。图\ref{fig:14-1}给出了这种架构的示意图。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter14/Figures/figure-main-module}
\caption{神经机器翻译推断系统结构}
\label{fig:14-1}
\end{figure}
%----------------------------------------------
\parinterval 这是一个非常通用的框架,同样适用基于统计的机器翻译模型。因此,神经机器翻译推断中的很多问题与统计机器翻译是一致的,比如:束搜索的宽度、解码终止条件等等。
\parinterval 一般来说,机器翻译推断系统的设计要考虑三个因素:搜索的准确性、搜索的时延、搜索所需要的存储。通常,准确性是研究人员最关心的问题,比如可以通过增大搜索空间来找到模型得分更高的结果。而搜索的延时和存储消耗是实践中必须要考虑的问题,比如,可以设计合理的搜索终止条件降低延时。
\parinterval 虽然,上述问题在统计机器翻译中都有讨论,但是在神经机器翻译中又面临着新的挑战。
\begin{itemize}
\vspace{0.5em}
\item 搜索的基本问题在神经机器翻译中有着特殊的现象。比如,在统计机器翻译中,降低搜索错误是提升翻译效果的一种手段。但是神经机器翻译中,简单的降低搜索错误可能无法带来性能的提升,甚至造成翻译品质的下降\upcite{li-etal-2018-simple,Stahlberg2019OnNS}
\vspace{0.5em}
\item 搜索的延时很高,系统实际部署的成本很高。与统计机器翻译系统不同的是,神经机器翻译依赖大量的浮点预算。这也造成神经机器翻译系统的推断会比统计机器翻译系统慢很多。虽然可以使用GPU来加速神经机器翻译的推断速度,但是也大大增加了成本;
\vspace{0.5em}
\item 神经机器翻译优化容易陷入局部最优,单模型的表现并不稳定。由于神经机器翻译优化的目标函数非常不光滑,每次训练得到的模型往往只是一个局部最优解。在新数据上,使用这个局部最优模型进行推断时模型的表现可能不稳定。
\vspace{0.5em}
\end{itemize}
\parinterval 研究者们也针对以上问题开展了大量的研究工作。在\ref{sec:14-2}节中,我们会对神经机器翻译推断中所涉及的一些基本问题进行讨论。虽然这些问题在统计机器翻译中均有涉及,但是在神经机器翻译中却有着不同的现象和解决思路。在\ref{sec:14-3}-\ref{sec:14-5}节中,我们会针对改进神经机器翻译推断效率和多模型融合进行讨论。
%----------------------------------------------------------------------------------------
% NEW SECTION
%----------------------------------------------------------------------------------------
\section{基本问题}\label{sec:14-2}
\parinterval 下面将就神经机器翻译推断中的若干基本问题进行分析。
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{推断的方向}
\parinterval 机器翻译有两种常用的推断方式\ \dash \ 自左向右翻译和自右向左翻译。自左向右翻译符合现实世界中人类的语言使用规律,因为在人为翻译一个句子时,人们总是习惯从句子开始的部分往后生成\footnote{有些语言中,文字是自右向左书写,这时自右向左翻译更符合人类使用这种语言的习惯。}。不过,有时候人也会使用当前单词后面的译文信息。也就是说,翻译也需要“未来” 的文字信息。于是很容易想到使用自右向左的方法对译文进行生成。
\parinterval 以上两种推断方式在神经机器翻译中都有应用,对于源语言句子$\seq{x}=\{x_1,x_2,\dots,x_m\}$和目标语句子$\seq{y}=\{y_1,y_2,\dots,y_n\}$,用自左向右的方式可以将翻译概率$\funp{P}(\seq{y}\vert\seq{x})$描述为公式\eqref{eq:14-1}
\begin{eqnarray}
\funp{P}(\seq{y}\vert\seq{x})=\prod_{j=1}^n \funp{P}(y_j\vert\seq{y}_{<j},\seq{x})
\label{eq:14-1}
\end{eqnarray}
\parinterval 而用自右向左的方式可以得到公式\eqref{eq:14-2}
\begin{eqnarray}
\funp{P}(\seq{y}\vert\seq{x})=\prod_{j=1}^n \funp{P}(y_{n+1-j}\vert\seq{y}_{>j},\seq{x})
\label{eq:14-2}
\end{eqnarray}
\parinterval 其中,$\seq{y}_{<j}=\{y_1,y_2,\dots,y_{j-1}\}$$\seq{y}_{>j}=\{y_{j+1},y_{j+2},\dots,y_n\}$
\parinterval 可以看到,自左向右推断和自右向左推断本质上是一样的。第十章$\sim$第十二章均使用了自左向右的推断方法。自右向左推断比较简单的实现方式是:直接将训练用双语数据中的目标语句子进行反向,之后仍然使用原始的模型进行训练即可。在推断的时候,生成的目标语词串也需要进行反向得到最终的译文。有时候,使用自右向左的翻译方式会取得更好的效果\upcite{DBLP:conf/wmt/SennrichHB16}。不过更多情况下需要同时使用词串左端(历史)和右端(未来)的信息。有多种思路:
\begin{itemize}
\vspace{0.5em}
\item {\small\sffamily\bfseries{重排序}}\index{重排序}(Reranking)\index{Reranking}。可以用一个基础模型(比如自左向右的模型)得到每个源语言句子的$n$-best翻译结果,之后同时用基础模型的得分和自右向左模型对$n$-best翻译结果进行重排序\upcite{Liu2016AgreementOT,DBLP:conf/wmt/SennrichHB16,DBLP:conf/wmt/LiLXLLLWZXWFCLL19}。也有研究者利用最小贝叶斯风险的方法进行重排序\upcite{Stahlberg2018TheUO}。由于这类方法不会改变基础模型的翻译过程,因此相对“安全”,不会对系统性能造成副作用。
\vspace{0.5em}
\item {\small\sffamily\bfseries{双向推断}}\index{双向推断}(Bidirectional Inference)\index{Bidirectional Inference}。另一种方法是,让自左向右和自右向左模型同步进行,也就是同时考虑译文左侧和右侧的文字信息\upcite{DBLP:conf/aaai/ZhangSQLJW18,Zhou2019SynchronousBN,DBLP:conf/aaai/ZhangSQLJW18}。例如,可以同时对左边和右边生成的译文进行注意力计算,得到当前位置的单词预测结果。这种方法能够更加充分的融合双向翻译的优势。
\vspace{0.5em}
\item {\small\sffamily\bfseries{多阶段推断}}\index{多阶段推断}(Multi-stage Inference)\index{Multi-stage Inference}。在第一阶段,通过一个基础模型生成一个初步的翻译结果。在第二阶段,同时使用第一阶段生成的翻译结果和源语言句子,进一步生成更好的译文\upcite{Li2017EnhancedNM,ElMaghraby2018EnhancingTF,Geng2018AdaptiveMD}。由于第一阶段的结果已经包含了完整的译文,因此在第二阶段中,系统实际上已经同时使用了整个译文串的两端信息。上述过程可以扩展为迭代式的译文生成方法,配合掩码等技术,可以在生成每个译文单词时,同时考虑左右两端的上下文信息\upcite{Lee2018DeterministicNN,Gu2019LevenshteinT,Guo2020JointlyMS}
\vspace{0.5em}
\end{itemize}
\parinterval 不论是自左向右还是自右向左翻译,本质上都是在对上下文信息进行建模。除了自左向右和自右向左的翻译策略,研究者们也提出了许多新的译文生成策略,比如,{\small\sffamily\bfseries{从中部向外生成}}\index{从中部向外生成}(Middle-Out Decoding)\index{Middle-Out Decoding}、按源语言顺序生成\upcite{Stahlberg2018AnOS}、基于插入的方式生成\upcite{Stern2019InsertionTF,stling2017NeuralMT}等。或者将翻译问题松弛化为一个连续空间模型的优化问题,进而在推断的过程中同时使用译文串左右两端的信息\upcite{Geng2018AdaptiveMD}
\parinterval 最近,以BERT 为代表的预训练语言模型已经证明,一个单词的“历史” 和“未来” 信息对于生成当前单词都是有帮助的\upcite{devlin2019bert}。类似的观点也在神经机器翻译编码器设计中得到验证。比如,在基于循环神经网络的模型中,经常同时使用自左向右和自右向左的方式对源语言句子进行编码。还有,Transformer 编码会使用整个句子的信息对每一个源语言位置进行表示。因此,在神经机器翻译的解码端采用类似的策略是有其合理性的。
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{译文长度控制}
\parinterval 机器翻译推断的一个特点是译文长度需要额外的机制进行控制\upcite{Kikuchi2016ControllingOL,Takase2019PositionalET,Murray2018CorrectingLB,Sountsov2016LengthBI}。这是因为机器翻译在建模时仅考虑了将训练样本(即标准答案)上的损失最小化,但是推断的时候会看到从未见过的样本,而且这些未见样本占据了样本空间的绝大多数。这时,模型会产生偏置,即模型仅仅能够对见过的样本进行准确建模,而对于未见样本的建模并不准确。该问题导致的一个现象是:直接使用训练好的模型会翻译出长度短的离谱的译文。由于神经机器翻译模型使用单词概率的乘积表示整个句子的翻译概率,它天然就倾向生成短译文,因为短译文会使用更少的概率因式相乘。在使用极大似然估计进行模型训练时,这个问题会更加严重,因为模型只关心每个目标语位置的正确预测,对于译文长度没有考虑。译文长度不合理的问题也出现在统计机器翻译模型中,当时的策略是在推断过程中引入译文长度控制机制\upcite{Koehn2007Moses}。神经机器翻译也借用了类似的思想来控制译文长度,有几种的方法:
\begin{itemize}
\vspace{0.5em}
\item 长度惩罚因子。用译文长度来归一化翻译概率是最常用的方法:对于源语言句子$\seq{x}$和译文句子$\seq{y}$,模型得分$\textrm{score}(\seq{x},\seq{y})$的值会随着译文$\seq{y}$ 的变长而减小,为了避免此现象,可以引入一个长度惩罚函数$\textrm{lp}(\seq{y})$,并定义模型得分如公式\eqref{eq:14-12}所示:
\begin{eqnarray}
\textrm{score}(\seq{x},\seq{y})=\frac{\log \funp{P}(\seq{y}\vert\seq{x})}{\textrm{lp}(\seq{y})}
\label{eq:14-12}
\end{eqnarray}
通常$\textrm{lp}(\seq{y})$$\vert\seq{y}\vert$的增大而增大,因此这种方式相当于对$\log \funp{P}(\seq{y}\vert\seq{x})$按长度进行归一化\upcite{Jean2015MontrealNM}$\textrm{lp}(\seq{y})$的定义方式很多,比如表\ref{tab:14-1}就列出了一些常用的形式,其中$\alpha$是需要人为设置的参数。
%----------------------------------------------------------------------------------------------------
\begin{table}[htp]
\centering
\caption{长度惩罚因子$\textrm{lp}(\seq{y})$的定义($|\seq{y}|$表示译文长度)}
\label{tab:14-1}
\small
\begin{tabular}{l | l l l}
\rule{0pt}{15pt} 名称 & $\textrm{lp}(\seq{y})$ \\
\hline
\rule{0pt}{15pt} 句子长度 & $\textrm{lp}(\seq{y}) = {\vert\seq{y}\vert}^{\alpha}$ \\
\rule{0pt}{15pt} GNMT惩罚因子 & $\textrm{lp}(\seq{y}) = \frac{{(5+\vert\seq{y}\vert)}^{\alpha}}{{(5+1)}^{\alpha}}$ \\
\rule{0pt}{15pt} 指数化长度惩罚因子 & $\textrm{lp}(\seq{y}) = \alpha \cdot \log(\vert\seq{y}\vert)$ \\
\end{tabular}
\end{table}
%----------------------------------------------------------------------------------------------------
\vspace{0.5em}
\item 译文长度范围约束。为了让译文的长度落在合理的范围,神经机器翻译的推断也会有一个译文长度约束\upcite{Vaswani2018Tensor2TensorFN,KleinOpenNMT}。令$[a,b]$表示一个长度范围,可以定义:
\begin{eqnarray}
a &=& \omega_{\textrm{low}}\cdot |\seq{x}| \label{eq:14-3}\\
b &=& \omega_{\textrm{high}}\cdot |\seq{x}| \label{eq:14-4}
\end{eqnarray}
\vspace{0.5em}
\noindent 其中,$\omega_{\textrm{low}}$$\omega_{\textrm{high}}$分别是表示译文长度的下限和上限的参数,比如,很多系统中有$\omega_{\textrm{low}}=1/2$$\omega_{\textrm{high}}=2$,表示译文至少有源语言句子一半长,最多有源语言句子两倍长。$\omega_{\textrm{low}}$$\omega_{\textrm{high}}$的设置对推断效率影响很大,$\omega_{\textrm{high}}$可以被看作是一个推断的终止条件,最理想的情况是$\omega_{\textrm{high}} \cdot |\seq{x}|$恰巧就等于最佳译文的长度,这时没有任何计算的浪费。反过来的一种情况,$\omega_{\textrm{high}} \cdot |\seq{x}|$远大于最佳译文的长度,这时很多计算都是无用的。为了找到长度预测的准确率和召回率之间的平衡,一般需要大量的实验最终确定$\omega_{\textrm{low}}$$\omega_{\textrm{high}}$。当然,利用统计模型预测$\omega_{\textrm{low}}$$\omega_{\textrm{high}}$也是非常值得探索的方向,比如基于产出率的模型\upcite{Gu2017NonAutoregressiveNM,Feng2016ImprovingAM}
\vspace{0.5em}
\item 覆盖度模型。译文长度过长或过短的问题,本质上对应着 {\small\sffamily\bfseries{过翻译}}\index{过翻译}(Over Translation)\index{Over Translation}{\small\sffamily\bfseries{欠翻译}}\index{欠翻译}(Under Translation)\index{Under Translation}的问题\upcite{Yang2018OtemUtemOA}。这两种问题出现的原因主要在于:神经机器翻译没有对过翻译和欠翻译建模,即机器翻译覆盖度问题\upcite{TuModeling}。针对此问题,最常用的方法是在推断的过程中引入一个度量覆盖度的模型。比如,使用GNMT 覆盖度模型\upcite{Wu2016GooglesNM},其中翻译模型得分被定义为:
\begin{eqnarray}
\textrm{score}(\seq{x},\seq{y}) &=& \frac{\log \funp{P}(\seq{y} | \seq{x})}{\textrm{lp}(\seq{y})} + \textrm{cp}(\seq{x},\seq{y}) \label {eq:14-5}\\
\textrm{cp}(\seq{x},\seq{y}) &=& \beta \cdot \sum_{i=1}^{|\seq{x}|} \log(\textrm{min} (\sum_{j}^{|\seq{y}|} a_{ij} , 1))
\label{eq:14-6}
\end{eqnarray}
\noindent 其中,$\textrm{cp}(\seq{x},\seq{y}) $表示覆盖度模型,它度量了译文对源语言每个单词的覆盖程度。$\textrm{cp}(\seq{x},\seq{y}) $的定义中,$a_{ij}$表示源语言第$i$个位置与目标语第$j$个位置的注意力权重,这样$\sum \limits_{j}^{|\seq{y}|} a_{ij}$就可以被当作是源语言第$i$个单词被翻译了“多少”,如果它大于1,表明翻译多了;如果小于1,表明翻译少了。公式\eqref{eq:14-6}会惩罚那些欠翻译的翻译假设。覆盖度模型的一种改进形式是\upcite{li-etal-2018-simple}
\begin{eqnarray}
\textrm{cp}(\seq{x},\seq{y}) = \sum_{i=1}^{|\seq{x}|} \log( \textrm{max} ( \sum_{j}^{|\seq{y}|} a_{ij},\beta))
\label{eq:14-7}
\end{eqnarray}
\noindent 公式\eqref{eq:14-7}将公式\eqref{eq:14-6}中的向下截断方式换为了向上截断。这样,模型可以对过翻译(或重复翻译)有更好的建模能力。不过,这个模型需要在开发集上细致地调整$\beta$,也带来了一定的额外工作量。此外,也可以将这种覆盖度单独建模并进行参数化,与翻译模型一同训练\upcite{Mi2016CoverageEM,TuModeling,Kazimi2017CoverageFC}。这样可以得到更加精细的覆盖度模型。
\vspace{0.5em}
\end{itemize}
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{搜索终止条件}
\parinterval 在机器翻译推断中,何时终止搜索是一个非常基础的问题。如\chaptertwo 所述,系统研发者一方面希望尽可能遍历更大的搜索空间,找到更好的结果,另一方面也希望在尽可能短的时间内得到结果。这时搜索的终止条件就是一个非常关键的指标。在束搜索中有很多终止条件可以使用,比如,在生成一定数量的译文之后就终止搜索,或者当最佳译文比排名第二的译文分数的差超过一个阈值时就终止搜索等。
\parinterval 在统计机器翻译中,搜索的终止条件相对容易设计。因为所有的翻译结果都可以用相同步骤的搜索过程生成,比如,在CYK解码中搜索的步骤仅与构建的分析表大小有关。在神经机器翻译,这个问题要更加复杂。当系统找到一个完整的译文之后,可能还有很多译文没有被生成完,这时就面临着如何决定是否继续搜索的问题。
\parinterval 针对这些问题,研究者们设计了很多新的方法。比如,有研究者可以在束搜索中使用启发性信息让搜索尽可能早的停止,同时保证搜索结果是“最优的”\upcite{DBLP:conf/emnlp/HuangZM17}。也有研究者将束搜索建模为优化问题\upcite{Wiseman2016SequencetoSequenceLA,DBLP:conf/emnlp/Yang0M18},进而设计出新的终止条件\upcite{Ma2019LearningTS}。很多开源机器翻译系统也都使用了巧妙的终止条件,比如,在OpenNMT系统中当搜索束中当前最好的假设生成了完整的译文搜索就会停止\upcite{KleinOpenNMT},在RNNSearch系统中当找到与预设数量的译文时搜索就会停止,同时在这个过程中会不断减小搜索束的大小\upcite{bahdanau2014neural}
\parinterval 实际上,设计搜索终止条件反映了搜索延时和搜索精度之间的一种折中\upcite{Eisner2011LearningST,Jiang2012LearnedPF}。在很多应用中,这个问题会非常关键。比如,在同声传译中,对于输入的长文本,何时开始翻译、何时结束翻译都是十分重要的\upcite{Zheng2020OpportunisticDW,Ma2019STACLST}。在很多线上翻译应用中,翻译结果的响应不能超过一定的时间,这时就需要一种{\small\sffamily\bfseries{时间受限的搜索}}\index{时间受限的搜索}(Time-constrained Search)\index{Time-constrained Search}策略\upcite{DBLP:conf/emnlp/StahlbergHSB17}
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{译文多样性}
\parinterval 机器翻译系统的输出并不仅限于单个译文。很多情况下,需要多个译文。比如,译文重排序中通常就需要系统的$n$-best输出,而在交互式机器翻译中也往往需要提供多个译文供用户选择\upcite{Peris2017InteractiveNM,Peris2018ActiveLF}。但是,无论是统计机器翻译还是神经机器翻译,都面临一个同样的问题:$n$-best输出中的译文十分相似。表\ref{tab:14-2}就展示了一个神经机器翻译输出的多个翻译结果,可以看到这些译文的区别很小。这个问题也被看做是机器翻译缺乏{\small\sffamily\bfseries{译文多样性}}\index{译文多样性}(Translation Diversity)\index{Translation Diversity}的问题\upcite{Gimpel2013ASE,Li2016MutualIA,DBLP:conf/emnlp/DuanLXZ09,DBLP:conf/acl/XiaoZZW10,xiao2013bagging}
%----------------------------------------------------------------------
\begin{table}[htp]{
\begin{center}
\caption{中英神经机器翻译系统的3-best结果实例}
{
\begin{tabular}{c|l}
源文 & 那只敏捷的棕色狐狸跳过了那只懒惰的狗。 \\
\hline
\rule{0pt}{10pt} {\small{NO.1}} & The agile brown fox jumped over the lazy dog. \\
\rule{0pt}{10pt} {\small{NO.2}} &The quick brown fox jumped over the lazy dog. \\
\rule{0pt}{10pt} {\small{NO.3}} & The quick brown fox jumps over the lazy dog. \\
\end{tabular}
\label{tab:14-2}
}
\end{center}
}\end{table}
%----------------------------------------------------------------------
\parinterval 机器翻译输出缺乏多样性会带来很多问题。一个直接的问题是在重排序时无法选择到更好的译文,因为所有候选都没有太大的差别。另一方面,当需要利用$n$-best输出来表示翻译假设空间时,缺乏多样性的译文也会使得翻译后验概率的估计不够准确,造成建模的偏差。在一些模型训练方法中,这种后验概率估计的偏差也会造成较大的影响\upcite{DBLP:conf/acl/ShenCHHWSL16}。从人工翻译的角度,同一个源语言句子的译文应该是多样的,因此过于相似的译文也无法反映足够多的翻译现象。
\parinterval 因此增加译文多样性成为了机器翻译研究中一个有价值的方向。在统计机器翻译中就有很多尝试\upcite{DBLP:conf/emnlp/DuanLXZ09,DBLP:conf/acl/XiaoZZW10,xiao2013bagging}。主要思路是通过加入一些“扰动”让翻译模型的行为发生变化,进而得到区别更大的译文。类似的方法也同样适用于神经机器翻译。例如,可以在推断过程中加入额外的模型,用于惩罚出现相似译文的情况\upcite{Li2016ADO,Li2016MutualIA}。也有研究者在翻译模型中引入新的隐含变量或者加入新的干扰,进而控制多样性译文的输出\upcite{He2018SequenceTS,Shen2019MixtureMF,Wu2020GeneratingDT}。类似的,也可以利用模型中局部结构的多样性来生成多样的译文\upcite{Sun2020GeneratingDT}。除了考虑每个译文之间的多样性,也可以对译文进行分组,之后增加不同组之间的多样性\upcite{Vijayakumar2016DiverseBS}
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{搜索错误}
\parinterval 机器翻译的错误分为两类:搜索错误和模型错误。搜索错误是指由于搜索算法的限制,即使在潜在的搜索空间中有更好的解,模型也无法找到。比较典型的例子是,在对搜索进行剪枝的时候,如果剪枝过多,找到的结果很有可能不是最优的。这时就出现了搜索错误。
\parinterval 在统计机器翻译中,搜索错误可以通过减少剪枝进行缓解。比较简单的方式是增加束宽度,这往往会带来一定的性能提升\upcite{Xiao2016ALA}。也可以对搜索问题单独建模,以保证学习到的模型出现更少的搜索错误\upcite{Liu2014SearchAwareTF,Yu2013MaxViolationPA}。但是,在神经机器翻译中,这个问题却表现出不同的现象。在很多神经机器翻译系统中,随着搜索束的增大,系统的BLEU不升反降。图\ref{fig:14-3}展示了BLEU随着束大小的变化曲线。这个现象与传统的常识是相违背的,因此也有一些研究尝试解释这个现象\upcite{Stahlberg2019OnNS,Niehues2017AnalyzingNM}。在实验中,研究者也发现增加搜索束的大小会导致翻译生成的结果变得更短。他们将这个现象归因于:增加搜索束的大小,会导致更多的模型错误,因为神经机器翻译的建模是基于局部归一的最大似然估计\upcite{Sountsov2016LengthBI,Murray2018CorrectingLB,StahlbergNeural}。另一方面,也有研究者把这种翻译过短的现象归因于搜索错误\upcite{Stahlberg2019OnNS}。由于搜索时所面临的搜索空间是十分巨大的,因此搜索时可能无法找到模型定义的“最好”的译文。在某种意义上,这也体现了一种训练和推断不一致的问题。
%----------------------------------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter14/Figures/figure-beamsize-bleu}
\caption{一个神经机器翻译系统中搜索束大小对BLEU的影响\upcite{DBLP:conf/aclnmt/KoehnK17}}
\label{fig:14-3}
\end{figure}
%----------------------------------------------------------------------
\parinterval 也有研究者针对降低搜索错误提出了一些解决方案。典型的思路是从训练和推断的行为和目标不一致的角度切入。比如,为了解决{\small\sffamily\bfseries{曝光偏置}}\index{曝光偏置}(Exposure Bias)\index{Exposure Bias}问题\upcite{Ranzato2016SequenceLT},可以让系统使用前面步骤的预测结果作为预测下一个词所需要的历史信息,而不是依赖于标准答案\upcite{Bengio2015ScheduledSF,Zhang2019BridgingTG}。另一方面,为了解决训练和推断目标不一致的问题,可以在训练的时候模拟推断的行为,同时让模型训练的目标与评价系统的方法尽可能一致\upcite{DBLP:conf/acl/ShenCHHWSL16}
\parinterval 需要注意的是,前面提到的搜索束变大造成的翻译品质下降的问题还有其它解决方法。比如,可以通过对结果重排序来缓解这个问题\upcite{DBLP:conf/emnlp/Yang0M18},也可以通过设计更好的覆盖度模型来生成长度更加合理的译文\upcite{li-etal-2018-simple}。从这个角度说,上述问题的成因也较为复杂,因此需要同时考虑模型错误和搜索错误。
%----------------------------------------------------------------------------------------
% NEW SECTION
%----------------------------------------------------------------------------------------
\section{轻量模型}\label{sec:14-3}
\parinterval 翻译速度和翻译精度之间的平衡是机器翻译系统研发中的常见问题。即使是以提升翻译品质为目标的任务(如用BLEU进行评价),也不得不考虑翻译速度的影响。比如,在WMT 和CCMT 的一些任务中可能会使用反向翻译构造伪数据,需要大量的机器翻译;无监督机器翻译中也会频繁地使用神经机器翻译系统构造训练数据。如果翻译速度过慢会增大实验的周期。从应用的角度看,在很多场景下翻译速度甚至比品质更重要。比如,在线翻译和一些小设备上的机器翻译系统都需要保证相对低的翻译延时,以满足用户体验的最基本要求。虽然,我们希望能有一套又好又快的翻译系统,但是现实的情况是:往往需要通过牺牲一些翻译品质来换取速度的提升。下面就列举一些常用的神经机器翻译轻量模型和加速方法。这些方法通常是应用在解码端,因为相比编码端,神经机器翻译的解码端是推断过程中最耗时的部分。
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{输出层的词汇选择}
\parinterval 神经机器翻译需要对输入和输出的单词进行分布式表示,比如,每一个单词都用一个512 维向量进行表示。但是,由于真实的词表通常很大,因此计算并保存这些单词的向量表示就会消耗较多的计算和存储资源。特别是对于基于Softmax 的输出层,使用大词表往往会占用较多的系统运算时间。虽然可以通过BPE 和限制词汇表规模的方法降低输出层计算的负担\upcite{Gage1994ANA,DBLP:conf/acl/SennrichHB16a},但是为了获得可接受的翻译品质,词汇表也不能过小,因此输出层仍然十分耗时。
\parinterval 对于这个问题,可以通过改变输出层的网络结构进行缓解\upcite{DBLP:conf/acl/JeanCMB15}。一种比较简单的方法是对可能输出的单词进行筛选,简称词汇选择。这里,可以利用类似于统计机器翻译的翻译表,获得每个源语言单词最可能的译文。在翻译过程中,利用注意力机制找到每个目标语位置对应的源语言位置,之后获得这些源语言单词最可能的翻译候选。之后,Softmax 只需要在这个有限的翻译候选单词集合上计算,大大降低了输出层的计算量。尤其是对于CPU 上的系统,这个方法往往会带来明显的速度提升,同时保证翻译品质。图\ref{fig:14-4}给出了词汇选择方法的示意图。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter14/Figures/figure-different-softmax}
\caption{标准Softmax vs 基于词汇选择的Softmax}
\label{fig:14-4}
\end{figure}
%----------------------------------------------
\parinterval 实际上,词汇选择也是一种典型的处理大词表的方法(见\chapterthirteen)。这种方法最大的优点在于,它可以与其它方法结合,比如与BPE等方法结合。本质上,这种方法与传统基于统计的机器翻译中的短语表剪枝有类似之处\upcite{DBLP:conf/emnlp/ZensSX12,DBLP:conf/emnlp/JohnsonMFK07,DBLP:conf/emnlp/LingGTB12},当翻译候选过多的时候,可以根据翻译候选的质量对候选集进行剪枝。这种技术已经在统计机器翻译系统中得到成功应用。
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{消除冗余计算}
\parinterval 消除不必要的计算是加速机器翻译的常用技术。比如,在统计机器翻译时代,假设重组就是一种典型的避免冗余计算的手段(\chapterfour)。对于神经机器翻译中的Transformer 模型,一种简单有效的方法是对解码端的注意力结果进行缓存。在生成每个目标语译文时,Transformer 模型会对当前位置之前的所有位置进行自注意力操作,但是这些计算里只有和当前位置相关的计算是“新” 的,前面位置之间的注意力结果已经在之前的解码步骤里计算过,因此可以对其进行缓存。
\parinterval 此外,由于Transformer 模型较为复杂,还存在很多冗余。比如,Transformer 的每一层会包含自注意力机制、层正则化、残差连接、前馈神经网络等多种不同的结构。同时,不同结构之间还会包含一些线性变换。多层Transformer(通常为6 层)模型会更加复杂。但是,这些层可能在做相似的事情,甚至有些计算根本就是重复的。图\ref{fig:14-5}中展示了解码端自注意力和编码-解码注意力中不同层的注意力权重的相似性,这里的相似性利用JensenShannon散度进行度量\upcite{61115}。可以看到,自注意力中,2-5层之间的注意力权重的分布非常相似。编码-解码注意力也有类似的现象,临近的层之间有非常相似的注意力权重。这个现象说明:在多层神经网络中有些计算是冗余的,因此很自然的想法是消除这些冗余使得机器翻译变得更“轻”。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter14/Figures/figure-weight-similarity}
\caption{自注意力和编码-解码注意力中不同层之间注意力权重的相似性(深色表示相似)}
\label{fig:14-5}
\end{figure}
%----------------------------------------------
\parinterval 一种方法是将不同层的注意力权重进行共享,这样上层的注意力权重可以复用下层的注意力权重\upcite{Xiao2019SharingAW}。在编码-解码注意力中,由于注意力机制中输入的Value 都是一样的\footnote{在Transformer 解码端,编码解码注意力输入的Value 是编码端的输出,因此是相同的(详见\chaptertwelve 关于Transformer 模型的内容)。},我们甚至可以直接复用前一层注意力计算的结果。图\ref{fig:14-6}给出了不同方法的对比,其中$S$表示注意力权重,$A$表示注意机制的输出。可以看到,使用共享的思想,可以大大减少冗余的计算。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter14/Figures/figure-comparison-of-different-attention-method}
\caption{标准的多层自注意力、共享自注意力、共享编码-解码注意力方法的对比\upcite{Xiao2019SharingAW}}
\label{fig:14-6}
\end{figure}
%----------------------------------------------
\parinterval 另一种方法是对不同层的参数进行共享。这种方法虽然不能带来直接的提速,但是可以大大减小模型的体积。比如,可以重复使用同一层的参数完成多层的计算。极端一些的情况下,六层网络可以只使用一层网络的参数\upcite{DBLP:conf/aaai/DabreF19}。不过,在深层模型中(层数> 20),浅层部分的差异往往较大,而深层(远离输出)之间的相似度会更高。这时可以考虑对深层的部分进行更多的共享。
\parinterval 减少冗余计算也代表了一种剪枝的思想。本质上,是在利用模型参数的稀疏性假设\upcite{Narang2017BlockSparseRN,Gale2019TheSO}:一部分参数对模型整体的行为影响不大,因此可以直接被抛弃掉。这类方法也被使用在神经机器翻译模型的不同部分。比如,对于Transformer模型,也有研究发现多头注意力中的有些头是有冗余的\upcite{Michel2019AreSH},因此可以直接对其进行剪枝\upcite{DBLP:journals/corr/abs-1905-09418}
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{轻量解码端及小模型}
\parinterval 在推断时,神经机器翻译的解码端是最耗时的,因为每个目标语位置需要单独输出单词的分布,同时在搜索过程中每一个翻译假设都要被扩展成多个翻译假设,进一步增加了计算量。因此,另一种加速系统的思路是使用更加轻量的解码器\upcite{DBLP:journals/corr/HintonVD15,Munim2019SequencelevelKD}
\parinterval 比较简单的做法是把解码端的网络变得更“浅”、更“窄”。所谓浅网络是指使用更少的层构建神经网络,比如,使用3 层,甚至1 层网络的Transformer 解码器。所谓窄网络是指将网络中某些层中神经元的数量减少。不过,直接训练这样的小模型会带来翻译品质的下降。这时会考虑使用知识精炼等技术来提升小模型的品质。
\parinterval 另一种思路是化简Transformer 的解码端神经网络。比如,可以使用平均注意力机制代替原始的Transformer 自注意力机制\upcite{DBLP:journals/corr/abs-1805-00631},也可以使用运算更轻的卷积操作代替注意力模块\upcite{Wu2019PayLA}。前面提到的基于共享注意力机制的模型也是一种典型的轻量模型\upcite{Xiao2019SharingAW}
\parinterval 此外,使用异构神经网络也是一种平衡精度和速度的有效方法。在很多研究中发现,基于Transformer 的编码器对翻译品质的影响更大,而解码端的作用会小一些。因此,一种想法是用更快速的解码端结构,比如,用基于循环神经网络的解码端替换基于Transformer 的解码端\upcite{Chen2018TheBO}。这样,既能发挥Transformer 在编码上的优势,同时也能利用循环神经网络在解码端速度上的优势。使用类似的思想,也可以用卷积网络等结构进行解码端网络的设计。此外,也有研究者对注意力机制进行优化,以达到加速Transformer模型的目的\upcite{DBLP:journals/corr/abs-1805-00631,Kitaev2020ReformerTE,Katharopoulos2020TransformersAR,DBLP:journals/corr/abs-2006-04768}
\parinterval 针对轻量级Transformer模型的设计也包括层级的结构剪枝,这类方法试图通过跳过某些操作或者某些层来降低计算量。典型的相关工作是样本自适应网络结构,如 FastBERT\upcite{Liu2020FastBERTAS}、Depth Adaptive Transformer\upcite{Elbayad2020DepthAdaptiveT} 和LayerDrop\upcite{DBLP:conf/iclr/FanGJ20}等,与传统的Transformer的解码过程不同,这类网络结构在推断时不需要计算全部解码层,而是根据输入自动选择模型的部分层进行计算,达到加速和减少参数量的目的。此外,矩阵分解也是一种轻量级模型解决方案,这类方法通过矩阵分解的方法提升计算效率,通过简化复杂的矩阵计算来达到加速模型训练和推断的目的。例如 Adaptive Input Representations\upcite{DBLP:conf/iclr/BaevskiA19}提出词频自适应表示,词频越高则对应的词向量维度越大,反之越小,显著减少了词向量矩阵大小。此外还有一些工作尝试消除注意力机制中的冗余计算,对层与层之间的参数进行共享\upcite{Xiao2019SharingAW}或者是对跨层参数进行共享\upcite{Lan2020ALBERTAL},以达到加速Transformer模型的目的\upcite{DBLP:journals/corr/abs-1805-00631,Kitaev2020ReformerTE,Katharopoulos2020TransformersAR,DBLP:journals/corr/abs-2006-04768}
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{批量推断}
\parinterval 深度学习时代下,使用GPU(图形处理单元)已经成为绝大多数神经网络模型研究的基本要求。特别是对于机器翻译这样的复杂任务,GPU 的并行运算能力会带来明显的速度提升。为了充分利用GPU 的并行能力,可以同时对多个句子进行翻译,即{\small\sffamily\bfseries{批量推断}}\index{批量推断}(Batch Inference)\index{Batch Inference}
\parinterval\chaptersix 已经介绍了神经机器翻译中{\small\sffamily\bfseries{批量处理}}\index{批量处理}(Batching)\index{Batching}的基本概念。其实现并不困难,不过有两方面问题需要注意:
\begin{itemize}
\vspace{0.5em}
\item 批次生成策略。对于源语言文本预先给定的情况,通常是按句子长度组织每个批次,即:把长度相似的句子放到一个批次里。这样做的好处是可以尽可能保证一个批次中的内容是“满” 的,否则如果句长差异过大会造成批次中有很多位置用占位符填充,产生无用计算。对于实时翻译的情况,批次的组织较为复杂。由于有翻译延时的限制,可能无法等到有足够多的句子就要进行翻译。常见的做法是,设置一个等待的时间,在同一个时间段中的句子可以放到一个批次中(或者几个批次中)。对于高并发的情况,也可以考虑使用不同的Bucket保存不同长度范围的句子,之后将同一个Bucket 中的句子进行批量推断。
\vspace{0.5em}
\item 批次大小的选择。一个批次中的句子数量越多,GPU 设备的利用率越高,系统吞吐越大。但是,一个批次中所有句子翻译结束后才能拿到翻译结果,因此批次中有些句子即使已经翻译结束也要等待其它没有完成的句子。也就是说,从单个句子来看,批次越大翻译的延时越长,这也导致在翻译实时性要求较高的场景中,不能使用过大的批次。而且,大批次对GPU 显存的消耗更大,因此也需要根据具体任务合理选择批次大小。为了说明这些问题,图\ref{fig:14-7}展示了不同批次大小下的吞吐、延时和显存消耗。
\vspace{0.5em}
\end{itemize}
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter14/Figures/figure-batch-time-mem}
\caption{Transformer系统在不同批次大小下的吞吐、延时和显存消耗}
\label{fig:14-7}
\end{figure}
%----------------------------------------------
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{低精度运算}
\parinterval 降低运算强度也是计算密集型任务的加速手段之一。标准的神经机器翻译系统大多基于单精度浮点运算。从计算机的硬件发展看,单精度浮点运算还是很“重” 的。当计算能容忍一些精度损失的时候,可以考虑降低运算精度来达到加速的目的。比如:
\begin{itemize}
\vspace{0.5em}
\item 半精度运算。半精度运算是随着近几年GPU 技术发展而逐渐流行的一种运算方式。简单来说,半精度的表示要比单精度需要更少的存储单元,所表示的浮点数范围也相应的变小。不过,实践中已经证明神经机器翻译中的许多运算用半精度计算就可以满足对精度的要求。因此,直接使用半精度运算可以大大加速系统的训练和推断进程,同时对翻译品质的影响很小。不过,需要注意的是,在分布式训练的时候,由于参数服务器需要对多个计算节点上的梯度进行累加,因此保存参数的部分仍然会使用单精度浮点以保证多次累加之后不会造成精度的损失。
\vspace{0.5em}
\item 整型运算。整数运算是一种比浮点运算“轻” 很多的运算。无论是芯片占用面积、能耗还是处理单次运算的时钟周期数,整数运算相比浮点运算都有着明显的优势。因此,使用整数运算也是很有潜力的加速手段。不过,整数的表示和浮点数有着很大的不同。一个基本的问题是,整数是不连续的,因此无法准确的刻画浮点数中很小的小数。对于这个问题,一种解决方法是利用“量化+ 反量化+ 缩放” 的策略让整数运算近似浮点运算的效果\upcite{DBLP:journals/corr/abs-1906-00532,DBLP:conf/cvpr/JacobKCZTHAK18,DBLP:journals/corr/abs-1910-10485}。所谓“量化” 就是把一个浮点数离散化为一个整数,“反量化” 是这个过程的逆过程。由于浮点数可能超出整数的范围,因此会引入一个缩放因子。在量化前将浮点数缩放到整数可以表示的范围,反量化前再缩放回原始浮点数的表示范围。这种方法在理论上可以带来很好的加速效果。不过由于量化和反量化的操作本身也有时间消耗,而且在不同处理器上的表现差异较大。因此不同的实现方式带来的加速效果并不相同,需要通过实验测算。
\vspace{0.5em}
\item 低精度整型运算。使用更低精度的整型运算是进一步加速的手段之一。比如使用16 位整数、8 位整数,甚至4 位整数在理论上都会带来速度的提升,如表\ref{tab:14-3}所示。不过,并不是所有处理器都支持低精度整数的运算。开发这样的系统,一般需要硬件和特殊低精度整数计算库的支持。而且相关计算大多是在CPU 上实现,应用会受到一定的限制。
\vspace{0.5em}
\end{itemize}
%----------------------------------------------
\begin{table}[htp]
\centering
\caption{不同计算精度的芯片的运算速度对比\protect\footnotemark}
\begin{tabular}{ l | l l l l l}
\rule{0pt}{13pt} 指标 & FP32 &INT32 &INT16 &INT8 &INT4 \\ \hline
\rule{0pt}{13pt} 速度 & 1$\times$ & 3$\sim$4$\times$ & $\approx$4$\times$ & 4$\sim$6$\times$ & $\approx$8$\times$
\end{tabular}
\label{tab:14-3}
\end{table}
\footnotetext{表中比较了几种通用数据类型的乘法运算速度,不同硬件和架构上不同类型的数据的计算速度略有不同。总体来看整型数据类型和浮点型数据相比具有显著的计算速度优势,INT4相比于FP32数据类型的计算最高能达到8倍的速度提升。}
%--------------------------------------
\parinterval 实际上,低精度表示的另一个好处是可以减少模型存储的体积。比如,如果要把机器翻译模型作为软件的一部分打包存储,这时可以考虑用低精度的方式保存模型参数,使用时再恢复成原始精度的参数。值得注意的是,参数的离散化表示(比如整型表示)的一个极端例子是{\small\sffamily\bfseries{二值网络}}\index{二值网络}(Binarized Neural Networks)\index{Binarized Neural Networks}\upcite{DBLP:conf/nips/HubaraCSEB16},即只用−1和+1 表示网络的每个参数。二值化可以被看作是一种极端的量化手段。不过,这类方法还没有在机器翻译中得到大规模验证。
%----------------------------------------------------------------------------------------
% NEW SECTION
%----------------------------------------------------------------------------------------
\section{非自回归翻译}
\parinterval 目前大多数神经机器翻译模型都使用了编码器-解码器框架来实现,编码器将源语句的表示送入到解码器来生成目标句子;解码器通常从左到右逐字地生成目标句子,也就是,第$j$个目标词的生成依赖于先前生成的$j-1$个目标词。这种翻译方式也被称作{\small\sffamily\bfseries{自回归解码}}\index{自回归解码}(Auto-regressive Decoding)\index{Auto-regressive Decoding}\upcite{bahdanau2014neural,DBLP:journals/corr/GehringAGYD17,vaswani2017attention}。虽然最近提出的基于卷积或者自注意力的模型使得训练过程高度并行化,加快了训练速度。但由于推断过程自回归的特性,模型无法同时生成目标语的所有单词,这导致模型的推断过程非常缓慢,这对于神经机器的实际应用是个很大的挑战。因此,如何设计一个在训练和推断阶段都能够并行化的模型是目前研究的热点之一。
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{自回归VS非自回归}
\parinterval 目前主流的神经机器翻译的推断是一种{\small\sffamily\bfseries{自回归翻译}}\index{自回归翻译}(Autoregressive Translation)\index{Autoregressive Translation}过程。所谓自回归是一种描述时间序列生成的方式。对于目标序列$\seq{y}=\{y_1,\dots,y_n\}$,自回归模型假设$j$时刻状态$y_j$的生成依赖于之前的状态$\{y_1,\dots,y_{j-1}\}$,而且$y_j$$\{y_1,\dots,y_{j-1}\}$构成线性关系,那么生成$y_j$就是自回归的序列生成过程。神经机器翻译借用了这个概念,但是并不要求线性模型。对于输入的源语言序列$\seq{x}=\{x_1,\dots,x_m\}$,用自回归翻译模型生成译文序列$\seq{y}=\{y_1,\dots,y_n\}$的概率可以被定义为:
\begin{eqnarray}
\funp{P}(\seq{y}|\seq{x})=\prod_{j=1}^n {\funp{P}(y_j|y_{<j},\seq{x})}
\label{eq:14-8}
\end{eqnarray}
\noindent 即译文单词$y_{j}$的生成依赖前面已经生成的单词序列$\{y_1,\dots,y_{j-1}\}$和源语言序列$\{x_1,\dots,x_m\}$。这种自回归的翻译方式符合人们阅读和生成句子时的习惯。它在机器翻译等任务上也取得了较好的性能,特别是配合束搜索也能够有效的寻找近似最优译文。但是,由于解码器的每个步骤必须顺序地而不是并行地运行,自回归翻译模型会阻碍不同译文单词生成的并行化。特别是在GPU 上,翻译的自回归性会大大降低计算的并行度,导致推断过程的效率比较低下,设备利用率低。
\parinterval 对于这个问题,研究者也考虑移除翻译的自归回性,进行{\small\sffamily\bfseries{非自回归翻译}}\index{非自回归翻译}(Non-Autoregressive Translation,NAT)\index{Non-Autoregressive Translation}\upcite{Gu2017NonAutoregressiveNM}。一个简单的非自回归翻译模型将问题建模为:
\begin{eqnarray}
\funp{P}(\seq{y}|\seq{x})=\prod_{j=1}^n {\funp{P}(y_j|\seq{x})}
\label{eq:14-9}
\end{eqnarray}
\noindent 其中,位置$j$上的输出$y_j$只依赖于输入句子$\seq{x}$,与其它位置上的输出无关。于是,所有位置上${y_j}$都可以并行生成,大大提高了GPU 等并行运算设备的利用率。这种方式一般可以带来几倍甚至十几倍的速度提升。
%----------------------------------------------------------------------------------------
% NEW SUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{非自回归翻译模型的结构}
\parinterval 在介绍非自回归模型的具体结构之前,先来想想如何实现一个简单的非自回归翻译模型。这里用标准的Transformer来举例。首先为了一次性能够生成所有的词,需要将解码端的对未来信息屏蔽的矩阵丢弃从而去掉模型的自回归性。此外,还要考虑要生成的目标句子有多长。自回归模型每步的输入是上一步解码出的结果,当预测到终止符<eos>时序列的生成就自动停止了,然而非自回归模型却没有这样的特性,因此还需要一个长度预测器来预测出其长度,之后再用这个长度得到每个位置的表示,进而完成整个序列的生成。
\parinterval\ref{fig:14-12}就是一个最简单的非自回归翻译模型,在推断过程就可以一次性解码出整个目标序列。但是这样得到的模型所翻译出的句子质量很低。比如,在IWSLT英德等数据上的BLEU值只有个位数,而现在最好的自回归模型已经能够达到30左右的BLEU值。这是因为每个位置词的分布$\funp{P}(y_j)$只依赖于源语言句子$\seq{x}$,使得模型对真实目标分布的近似性很差,缺乏了关键的序列依赖信息。
%----------------------------------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter14/Figures/figure-non-autoregressive}
\caption{一个简单的非自回归模型}
\label{fig:14-12}
\end{figure}
%----------------------------------------------------------------------
\parinterval 完全独立地对每个词建模,会出现什么问题呢?来看一个例子,将中文“谢谢你”翻译成英文,可以翻译成“thanks to you”或者“thanks a lot”。假设生成这两种翻译的概率是相等的,即一半的概率是“thanks to you”,另一半的概率是“thanks a lot”。由于非自回归模型的条件独立性假设,解码时第二个词“to”和“a”的概率是差不多大的,第三个词“you”和“lot”的概率差不多大的,会使得模型生成出“thanks to lot”或者“thanks a you”这样错误的翻译,如图\ref{fig:14-13}所示。这便是影响句子质量的关键问题,称之为{\small\sffamily\bfseries{多峰问题}}\index{多峰问题}(Multi-modality Problem)\index{Multi-modality Problem}\upcite{Gu2017NonAutoregressiveNM}。针对非自回归模型难以处理多峰问题进行改进是提升非自回归模型质量的关键。
%----------------------------------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter14/Figures/figure-multi-modality}
\caption{非自回归模型中的多峰问题}
\label{fig:14-13}
\end{figure}
%----------------------------------------------------------------------
\parinterval 因此,非自回归翻译方面的研究大多集中在针对以上问题的求解\upcite{Wei2019ImitationLF,Shao2019RetrievingSI,Akoury2019SyntacticallyST,Guo2020FineTuningBC,Ran2020LearningTR}。有三个角度:使用繁衍率来预测长度、使用句子级知识蒸馏来降低学习难度、使用自回归模型打分来挑选好的翻译。下面将依次对这些方法进行介绍。
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsubsection{1. 基于繁衍率的非自回归模型}
\parinterval\ref{fig:14-14}给出了基于繁衍率的非自回归模型的结构\upcite{Gu2017NonAutoregressiveNM},由以下四个模块组成:编码器,解码器,繁衍率预测器和解码端的位置注意力模块。
%----------------------------------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter14/Figures/figure-reproduction-rate}
\caption{基于繁衍率的非自回归模型}
\label{fig:14-14}
\end{figure}
%----------------------------------------------------------------------
\parinterval 与自回归翻译模型类似,Transformer模型的编码器和解码器都完全由前馈神经网络和多头注意力模块组成。在非自回归模型在解码开始之前,非自回归模型需要知道目标句子的长度,以便并行生成所有单词。更重要的是,非自回归模型需要一次性生成出所有的目标词,因此不能像自回归模型那样用已生成的词作为第一个解码器层的输入。那么非自回归模型解码器的输入是什么呢?如果完全省略第一个解码器层的输入,或者仅使用位置嵌入,将会导致非常差的性能。这里使用繁衍率来解决这个问题,繁衍率指的是对于每个源语言单词预测所对应的目标语单词的个数(见\chapterthree)。翻译过程取决于繁衍率序列,最终目标句子长度则由所有源语言单词对应的繁衍率之和决定。这个繁衍率序列可以通过外部词对齐工具来得到,从而来训练这个繁衍率预测器。但由于外部词对齐系统的会出现错误,因此在模型收敛之后,需要在繁衍率预测器上加一个强化学习的损失来进行微调。
\parinterval 另外,在每个解码器层中还包括额外的位置注意力模块,该模块与Transformer模型的其它部分中使用的多头注意力机制相同,如下:
\begin{eqnarray}
\textrm{Attention}(Q,K,V)&=&\textrm{Softmax}(\frac{QK^{T}}{\sqrt{d_k}})\cdot V
\label{eq:14-10}
\end{eqnarray}
\noindent 其中$d_k$表示模型的隐层大小,其中位置编码作为$Q$$K$,解码端上一层的输出作为$V$。将位置信息直接结合到注意力过程中,比单独的位置嵌入提供了更强的位置信息,同时该附加信息可能还会提高解码器执行局部重排序的能力。
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsubsection{2. 句子级知识蒸馏}
\parinterval 知识蒸馏的基本思路是让学生模型所表示的函数尽可能去拟合教师模型所表示的函数(\chapterthirteen 中将会有更加详细地介绍)。如果想要训练一个小模型,同时想让它的性能与大模型一样好。这时我们可以把大模型看作传授知识的“教师”,把小模型看作接受知识的“学生”。在训练时,先将输入送给教师模型,让它预测出概率分布,作为小模型的监督信息来计算损失函数,进而完成小模型的训练。
\parinterval 类似的,可以让自回归模型作为“教师”,非自回归模型作为“学生”。把自回归神经机器翻译生成的句子作为新的训练样本,送给非自回归机器翻译进行学习\upcite{Gu2017NonAutoregressiveNM,Lee2018DeterministicNN,Zhou2020UnderstandingKD,Guo2020FineTuningBC}。这种方式能够一定程度上解决多峰问题。
\parinterval 因为,经过训练的自回归模型会始终将中文“谢谢你”翻译成相同的英文结果,如生成“thanks to you”而不会出现“thanks a lot”的结果。这样的操作得到的数据集噪声更少,能够降低非自回归模型学习的难度。
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsubsection{3.自回归模型打分}
\parinterval 通过采样不同的繁衍率序列,可以得到多个不同的翻译候选。之后,把这些不同的译文再交给自回归模型来评分,选择一个最好的结果作为最终的翻译。在这里,可以使用强制解码同时对多个译文进行打分,因此这个过程可以充分并行。通常,这种方法能够很有效的提升非自回归翻译模型的译文质量,并且同时保持较高的推断速度\upcite{Gu2017NonAutoregressiveNM,Wei2019ImitationLF,Guo2019NonAutoregressiveNM,Wang2019NonAutoregressiveMT,Ma2019FlowSeqNC}
%----------------------------------------------------------------------------------------
% NEW SUBSECTION
%----------------------------------------------------------------------------------------
\subsection{更好的训练目标}
\parinterval 虽然非自回归翻译可以显著提升翻译速度,但是很多情况下其翻译质量还是低于传统的自回归翻译\upcite{Gu2017NonAutoregressiveNM,Kaiser2018FastDI,Guo2020FineTuningBC}。因此,很多工作致力于缩小自回归模型和非自回归模型的性能差距\upcite{Ran2020LearningTR,Tu2020ENGINEEI,Shu2020LatentVariableNN}。一些方法通常通过修改训练目标来达到提升非自回归翻译品质的目的,例如:
\begin{itemize}
\vspace{0.5em}
\item 基于层级知识蒸馏的方法\upcite{Li2019HintBasedTF}。由于自回归模型和非自回归模型的结构相差不大,因此可以将翻译质量更高的自回归模型作为“教师”,通过给非自回归模型提供监督信号,使其逐块的学习前者的分布。研究者发现了两点非常有意思的现象:1)非自回归模型输出的重复单词的位置的隐藏状态非常相似。2)非自回归模型的注意力分布比自回归模型的分布更加分散。这两点发现启发了研究者使用自回归模型中的隐层状态来指导非自回归模型学习。通过计算两个模型隐层状态的距离以及注意力矩阵的{\red{KL散度}}作为额外的损失来帮助非自回归模型的训练过程。
\vspace{0.5em}
\item 基于模仿学习的方法\upcite{Wei2019ImitationLF}。这种观点认为非自回归模型可以从性能优越的自回归模型中学得知识。模仿学习是强化学习中的一个概念,即从专家那里学习正确的行为,与监督学习很相似\upcite{Ho2016ModelFreeIL,Ho2016GenerativeAI,Duan2017OneShotIL}。与其不同的是,模仿学习不是照搬专家的行为,而是学习专家为什么要那样做。换句话说,学习的不是专家的镜像,而是一个专家的行为分布。这里,可以将自回归模型作为专家,非自回归模型学习不同时间步和不同层的中的解码状态,最后将模仿学习的损失与交叉熵损失加权求和后作为最终的优化目标。
\vspace{0.5em}
\item 添加额外的正则化项来显式的约束非自回归模型\upcite{Wang2019NonAutoregressiveMT}。非自回归模型的翻译结果中存在着两种非常严重的错误:重复翻译和不完整的翻译。第一种问题是因为解码器隐层状态中相邻的两个位置过于相似,因此翻译出来的单词也一样。对于第二个问题,通常将其归咎于非自回归模型在翻译的过程中丢失了一些源语句信息,从而造成了翻译效果的下降。针对这两个问题,可以通过在相邻隐层状态间添加相似度约束以及引入对偶学习的思想来计算一个重构损失。具体来说,对于目前正在进行的翻译$\seq{x}\to\seq{y}$,通过利用一个反向的自回归模型再将$\seq{y}$翻译成$\seq{x'}$,最后计算$\seq{x}$$\seq{x'}$的差异性作为损失。
\vspace{0.5em}
\end{itemize}
%----------------------------------------------------------------------------------------
% NEW SUBSECTION
%----------------------------------------------------------------------------------------
\subsection{引入自回归模块}
\parinterval 非自回归翻译消除了序列生成中不同位置间的依赖,在每个位置都进行独立的预测,但这反过来会导致翻译质量显着下降,因为缺乏不同单词间依赖关系的建模。因此,也有研究聚焦于在非自回归模型中添加一些自回归组件来提升网络结构的表达能力。
\parinterval 一种做法是将语法作为生成的目标句的框架\upcite{Akoury2019SyntacticallyST}。具体来说,先自回归地预测出一个目标句的句法块序列,将句法树作为序列信息的抽象,然后根据句法块序列非自回归地生成所有目标词。如图\ref{fig:14-21}所示,该模型由一个编码器和两个解码器组成。其中编码器和第一个解码器与标准的Transformer模型相同,用来自回归地预测句法树信息;第二个解码器将第一个解码器的句法信息作为输入,之后再非自回归地生成整个目标句子。在训练过程中,通过使用外部解析器获得对句法预测任务的监督。虽然可以简单地让模型预测整个句法树,但是这种方法会显著增加自回归步骤的数量,从而增大时间开销。因此,为了维持句法信息与解码时间的平衡,这里预测一些由句法类型和子树大小组成的块标识符(如VP3)而不是整个句法树。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter14/Figures/figure-syntax}
\caption{基于句法结构的非自回归模型}
\label{fig:14-21}
\end{figure}
%----------------------------------------------
\parinterval 另一种做法是半自回归地生成目标句子\upcite{Wang2018SemiAutoregressiveNM}。如图\ref{fig:14-20}所示,自回归模型从左到右依次生成目标句子,具有“最强”的自回归性;而非自回归模型完全独立的生成每个目标词,具有“最弱”的自回归性;半自回归模型则是将整个目标句子分成$k$个块,在组内执行非自回归解码,在组间则执行自回归的解码,能够在每个时间步并行产生多个连续的单词。通过调整块的大小,半自回归模型可以灵活的调整到自回归模型(当$k$等于1)和非自回归模型(当$k$大于最大的目标语长度)上来。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter14/Figures/figure-3vs}
\caption{自回归、半自回归和非自回归模型}
\label{fig:14-20}
\end{figure}
%----------------------------------------------
\parinterval 还有一种做法引入了轻量级的自回归重排序模块\upcite{Ran2019GuidingNN}。为了解决非自回归模型解码搜索空间过大的问题,可以使用重排序技术在相对较少的翻译候选上进行自回归模型的计算。如图\ref{fig:14-22}所示,该方法对源语言句子进行重新排列转换成由源语言单词组成但位于目标语言结构中的伪译文,然后将伪译文进一步转换成目标语言以获得最终的翻译。其中这个重排序模块是一个自回归模型(一层的循环神经网络)。
%----------------------------------------------------------------------
\begin{figure}[htp]
\centering
\input{}
\caption{引入重排序模块的非自回归模型}
\label{fig:14-22}
\end{figure}
%----------------------------------------------------------------------
%----------------------------------------------------------------------------------------
% NEW SUBSECTION
%----------------------------------------------------------------------------------------
\subsection{基于迭代精化的非自回归翻译模型}
\parinterval 如果一次并行生成整个序列,往往会导致单词之间的关系很难捕捉,因此也限制了这类方法的能力。即使生成了错误的译文单词,这类方法也无法修改。针对这些问题,也可以使用迭代式的生成方式\upcite{Lee2018DeterministicNN,Ghazvininejad2019MaskPredictPD,Kasai2020NonAutoregressiveMT}。这种方法放弃了一次生成最终的目标句子,而是将解码出的文本再重新送给解码器,在每次迭代中来改进之前生成的单词,可以理解为句子级上的自回归模型。这样做的好处在于,每次迭代的过程中可以利用已经生成的部分翻译结果,来指导其它部分的生成。
\parinterval\ref{fig:14-18}展示了这种方法的简单示例。它拥有一个解码器和$N$个编码器。解码器首先预测出目标句子的长度,然后将输入$\seq{x}$按照长度复制出$\seq{x'}$作为第一个解码器的输入,之后生成$\seq{y'}$出作为第一轮迭代的输出。接下来再把$\seq{y'}$输入给解码器2输出$\seq{y''}$,以此类推。那么迭代到什么时候结束呢?一种简单的做法是提前制定好迭代次数,这种方法能够自主地对生成句子的质量和效率进行平衡。另一种称之为“自适应”的方法,具体是通过计算当前生成的句子上一次生成的变化量来自动停止,例如,使用{\red 杰卡德相似系数}作为变化量函数。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter14/Figures/figure-iteration}
\caption{基于迭代精化的非自回归翻译模型}
\label{fig:14-18}
\end{figure}
%----------------------------------------------
\parinterval 除了使用上一个步骤的输出,当前解码器的输入还使用了添加噪声的正确目标语句子,两种使用情况之间使用一个超参数控制\upcite{Lee2018DeterministicNN}。另外,对于目标语长度的预测,本文使用编码端的输出单独训练了一个独立的长度预测模块,这种方法也推广到了目前大多数模型上。
\parinterval 另一种方法借鉴了BERT的思想\upcite{devlin2019bert},提出了一种新的解码方法:Mask-Predict\upcite{Ghazvininejad2019MaskPredictPD}
\parinterval 类似于BERT的[CLS],该方法在源语句子的最前面加上了一个特殊符号[LENGTH]作为输入,用来预测目标句的长度$n$。之后,将特殊符[MASK](与BERT中的[MASK]有相似的含义)复制$n$次作为解码器的输入,然后用非自回归的方式生成目标端所有的词。这样生成的翻译可能是比较差的,因此可以将第一次生成出的这些词中不确定(即生成概率比较低)的一些词再“擦”掉,依据目标端剩余的单词以及源语言句子重新进行预测,不断迭代,直到满足停止条件为止。图\ref{fig:14-19}给出了一个示例。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter14/Figures/figure-mask-predict}
\caption{Mask-Predict的模型结构}
\label{fig:14-19}
\end{figure}
%----------------------------------------------
%----------------------------------------------------------------------------------------
% NEW SECTION
%----------------------------------------------------------------------------------------
\section{多模型集成}\label{sec:14-5}
\parinterval 在机器学习领域,把多个模型融合成一个模型是提升系统性能的一种有效方法。比如,在经典的AdaBoost 方法中\upcite{DBLP:journals/jcss/FreundS97},用多个“弱” 分类器构建的“强” 分类器可以使模型在训练集上的分类错误率无限接近0。类似的思想也被应用到机器翻译中\upcite{DBLP:conf/acl/XiaoZZW10,DBLP:conf/icassp/SimBGSW07,DBLP:conf/acl/RostiMS07,DBLP:conf/wmt/RostiZMS08},被称为{\small\sffamily\bfseries{系统融合}}\index{系统融合}(System Combination)\index{System Combination}。在各种机器翻译比赛中,系统融合已经成为经常使用的技术之一。
\parinterval 广义上来讲,使用多个特征组合的方式都可以被看作是一种模型的融合。融合多个神经机器翻译系统的方法有很多,可以分为如下几类。
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{假设选择}
\parinterval {\small\sffamily\bfseries{假设选择}}\index{假设选择}(Hypothesis Selection)\index{Hypothesis Selection}是最简单的系统融合方法\upcite{DBLP:conf/emnlp/DuanLXZ09}。其思想是:给定一个翻译假设集合,综合多个模型对每一个翻译假设进行打分,之后选择得分最高的假设作为结果输出。其中包含两方面问题:
\begin{itemize}
\vspace{0.5em}
\item 假设生成。构建翻译假设集合是假设选择的第一步,也是最重要的一步。理想的情况下,我们希望这个集合尽可能包含更多高质量的翻译假设,这样后面有更大的几率选出更好的结果。不过,由于单个模型的性能是有上限的,因此无法期望这些翻译假设的品质超越单个模型的上限。研究人员更加关心的是翻译假设的多样性,因为已经证明多样的翻译假设非常有助于提升系统融合的性能\upcite{DBLP:journals/corr/LiMJ16,xiao2013bagging}。为了生成多样的翻译假设,通常有两种思路:1)使用不同的模型生成翻译假设;2)使用同一个模型的不同参数和设置生成翻译假设。图\ref{fig:14-8}展示了二者的区别。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter14/Figures/figure-hypothesis-generation}
\caption{多模型翻译假设生成 vs 单模型翻译假设生成}
\label{fig:14-8}
\end{figure}
%----------------------------------------------
\noindent 比如,可以使用基于RNN 的模型和Transformer 模型生成不同的翻译假设,之后都放入集合中;也可以只用Transformer 模型,但是用不同的模型参数构建多个系统,之后分别生成翻译假设。在神经机器翻译中,经常采用的是第二种方式,因为系统开发的成本更低。比如,很多研究工作都是基于一个基础模型,用不同的初始参数、不同层数、不同解码方式生成多个模型进行翻译假设生成。
\vspace{0.5em}
\item 选择模型。所谓假设选择实际上就是要用一个更强的模型在候选中进行选择。这个“强” 模型一般是由更多、更复杂的子模型组合而成。常用的方法是直接使用翻译假设生成时的模型构建“强” 模型。比如,我们使用了两个模型生成了翻译假设集合,之后对所有翻译假设都分别用这两个模型进行打分。最后,综合两个模型的打分(如线性插值)得到翻译假设的最终得分,并进行选择。当然,也可以使用更强大的统计模型对多个子模型进行组合(如使用多层神经网络)。
\vspace{0.5em}
\end{itemize}
\parinterval 假设选择也可以被看作是一种简单的投票模型。对所有的候选用多个模型投票,选出最好的结果输出。包括重排序在内的很多方法也是假设选择的一种特例。比如,在重排序中,可以把生成$n$-best列表的过程看作是翻译假设生成过程,而重排序的过程可以被看作是融合多个子模型进行最终结果选择的过程。
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{局部预测融合}
\parinterval 神经机器翻译模型对每个目标端位置$j$的单词分布进行预测,即对于目标语言词汇表中的每个单词$y_j$,需要计算$\funp{P}(y_j | \seq{y}_{<j},\seq{x})$。假设有$K$个神经机器翻译系统,那么每个系统$k$都可以独立的计算这个概率,记为$\funp{P}_{k} (y_j | \seq{y}_{<j},\seq{x})$。于是,可以用公式\eqref{eq:14-11}融合这$K$个系统的预测:
\begin{eqnarray}
\funp{P}(y_{j} | \seq{y}_{<j},\seq{x}) = \sum_{k=1}^K \gamma_{k} \cdot \funp{P}_{k} (y_j | \seq{y}_{<j},\seq{x})
\label{eq:14-11}
\end{eqnarray}
\parinterval 其中$\gamma_{k}$表示第$k$个系统的权重,且满足$\sum_{k=1}^{K} \gamma_{k} = 1$。公式\eqref{eq:14-9}是一种线性模型。权重$\{ \gamma_{k}\}$可以在开发集上自动调整,比如,使用最小错误率训练得到最优的权重(见\chapterfour)。不过在实践中发现,如果这$K$个模型都是由一个基础模型衍生出来的,权重$\{ \gamma_{k}\}$对最终结果的影响并不大。因此,有时候也简单的将权重设置为$\gamma_{k} = \frac{1}{K}$。图\ref{fig:14-9}展示了对三个模型预测结果的集成。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter14/Figures/figure-forecast-result}
\caption{基于三个模型预测结果的集成}
\label{fig:14-9}
\end{figure}
%----------------------------------------------
\parinterval 公式\eqref{eq:14-11}是一种典型的线性插值模型,这类模型在语言建模等任务中已经得到成功应用。从统计学习的角度,多个模型的插值可以有效的降低经验错误率。不过,多模型集成依赖一个假设:这些模型之间需要有一定的互补性。这种互补性有时也体现在多个模型预测的上限上,称为Oracle。比如,可以把这$K$个模型输出中BLEU最高的结果作为Oracle,也可以选择每个预测结果中使BLEU 达到最高的译文单词,这样构成的句子作为Oracle。当然,并不是说Oracle 提高,模型集成的结果一定会变好。因为Oracle 是最理想情况下的结果,而实际预测的结果与Oracle 往往有很大差异。如何使用Oracle 进行模型优化也是很多研究者在探索的问题。
\parinterval 此外,如何构建集成用的模型也是非常重要的,甚至说这部分工作会成为模型集成方法中最困难的部分\upcite{DBLP:conf/wmt/LiLXLLLWZXWFCLL19,Sun2019BaiduNM,Wang2018TencentNM,DBLP:conf/wmt/SennrichHB16,Bi2019MultiagentLF}。为了增加模型的多样性,常用的方法有:
\begin{itemize}
\vspace{0.5em}
\item 改变模型宽度和深度,即用不同层数或者不同隐藏层大小得到多个模型;
\vspace{0.5em}
\item 不同的参数初始化,即用不同的参数初始化随机种子训练多个模型;
\vspace{0.5em}
\item 不同模型(局部)架构的调整,比如,使用不同的位置编码模型\upcite{Shaw2018SelfAttentionWR}、多层融合模型\upcite{WangLearning}等;
\vspace{0.5em}
\item 利用不同数量以及不同数据增强方式产生的伪数据训练模型(参考文献:今年的报告);
\vspace{0.5em}
\item 利用多分支多通道的模型,不同分支可能有不同结构,使得模型能有更好的表示能力(参考文献:今年的报告);
\vspace{0.5em}
\item 利用预训练进行参数共享之后微调的模型;
\vspace{0.5em}
\end{itemize}
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{译文重组}
\parinterval 假设直接从已经生成的译文中进行选择,因此无法产生“新” 的译文,也就是它的输出只能是某个单模型的输出。另一方面,预测融合需要同时使用多个模型进行推断,对计算和内存消耗较大。而且这两种方法有一个共性问题:搜索都是基于一个个字符串,相比指数级的译文空间,所看到的结果还是非常小的一部分。对于这个问题,一种方法是利用更加紧凑的数据结构对指数级的译文串进行表示。比如,可以使用格(lattice)对多个译文串进行表示\upcite{DBLP:conf/emnlp/TrombleKOM08}。图\ref{fig:14-10}展示了基于$n$-best词串和基于lattice 的表示方法的区别。可以看到,lattice 中从起始状态到结束状态的每一条路径都表示一个译文,不同译文的不同部分可以通过lattice 中的节点得到共享\footnote{本例中的lattice 也是一个混淆网络(Confusion Network)。}。理论上,lattice 可以把指数级数量的词串用线性复杂度的结构表示出来。
%----------------------------------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter14/Figures/figure-word-string-representation}
\caption{$n$-best词串表示 vs lattice词串表示}
\label{fig:14-10}
\end{figure}
%----------------------------------------------------------------------
\parinterval 有了lattice 这样的结构,多模型融合又有了新的思路。首先,可以将多个模型的译文融合为lattice。注意,这个lattice 会包含这些模型无法生成的完整译文句子。之后,用一个更强的模型在lattice 上搜索最优的结果。这个过程有可能找到一些“新”的译文,即结果可能是从多个模型的结果中重组而来的。lattice 上的搜索模型可以基于多模型的融合,也可以使用一个简单的模型,这里需要考虑的是将神经机器翻译模型适应到lattice 上进行推断\upcite{DBLP:conf/aaai/SuTXJSL17}。其过程基本与原始的模型推断没有区别,只是需要把模型预测的结果附着到lattice 中的每条边上,再进行推断。
\parinterval\ref{fig:14-11}对比了不同模型集成方法的区别。从系统开发的角度看,假设选择和模型预测融合的复杂度较低,适合快速原型,而且性能稳定。译文重组需要更多的模块,系统调试的复杂度较高,但是由于看到了更大的搜索空间,因此系统性能提升的潜力较大\footnote{一般来说lattice 上的Oracle 要比nbest译文上的Oracle 的质量高。}
%----------------------------------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter14/Figures/figure-different-integration-model}
\caption{不同的模型集成方法对比}
\label{fig:14-11}
\end{figure}
%----------------------------------------------------------------------
%---------------------------------------------------------------------------------------- %----------------------------------------------------------------------------------------
% NEW SECTION % NEW SECTION
%---------------------------------------------------------------------------------------- %----------------------------------------------------------------------------------------
\section{} \section{小结与扩展阅读}
\begin{itemize}
\vspace{0.5em}
\item 机器翻译系统中的推断也借用了{\small\sffamily\bfseries{统计推断}}\index{统计推断}(Statistical Inference)\index{Statistical Inference}的概念。传统意义上讲,这类方法都是在利用样本数据去推测总体的趋势和特征。因此,从统计学的角度也有很多不同的思路。例如,贝叶斯学习等方法就在自然语言处理中得到广泛应用\upcite{Held2013AppliedSI,Silvey2018StatisticalI}。其中比较有代表性的是{\small\sffamily\bfseries{变分方法}}\index{变分方法}(Variational Methods)\index{Variational Methods}。这类方法通过引入新的隐含变量来对样本的分布进行建模,某种意义上说它是在描述“分布的分布”,因此这种方法对事物的统计规律描述的会更加细致\upcite{Beal2003VariationalAF}。这类方法也被成功的用于统计机器翻译\upcite{Li2009VariationalDF,xiao2011language,}和神经机器翻译\upcite{Bastings2019ModelingLS,Shah2018GenerativeNM,Su2018VariationalRN,Zhang2016VariationalNM}
\vspace{0.5em}
\item 更加高效的网络结构是神经机器翻译模型的一大发展趋势,目前主要工作集中在结构化剪枝、减少模型的冗余计算、低秩分解。结构化剪枝中的代表性工作是LayerDrop\upcite{DBLP:conf/iclr/FanGJ20,DBLP:conf/emnlp/WangXZ20,DBLP:journals/corr/abs-2002-02925},这类方法在训练时随机选择层,在推断时根据输入来选择模型中的部分层进行计算,而跳过其余层,达到加速和减少参数量的目的。有关减少冗余计算的研究主要集中在改进注意力机制上,本章正文中已经有所介绍。低秩分解则针对词向量或者注意力的映射矩阵进行改进,例如词频自适应表示\upcite{DBLP:conf/iclr/BaevskiA19},词频越高则对应的向量维度越大,反之则越小,或者层数越高注意力映射矩阵维度越小\upcite{DBLP:journals/corr/abs-2006-04768,DBLP:journals/corr/abs-1911-12385,DBLP:journals/corr/abs-1906-09777,DBLP:conf/nips/YangLSL19}。在实践中比较有效的是较深的编码器与较浅的解码器结合的方式,极端情况下解码器仅使用1层即可取得与多层相媲美的翻译精度,而极大地提升翻译效率\upcite{DBLP:journals/corr/abs-2006-10369,DBLP:conf/aclnmt/HuLLLLWXZ20,DBLP:journals/corr/abs-2010-02416}
\vspace{0.5em}
\item 机器翻译实际部署时,对存储的消耗也是需要考虑的因素。因此如何让模型变得更小也是研发人员所关注的方向。当前的模型压缩方法主要可以分为几类:剪枝、量化、知识蒸馏和轻量方法,其中轻量方法主要是更轻量模型结构的设计,这类方法已经在上文进行了介绍。剪枝主要包括权重大小剪枝\upcite{Han2015LearningBW,Lee2019SNIPSN,Frankle2019TheLT,Brix2020SuccessfullyAT}、注意力头剪枝\upcite{Michel2019AreSH,DBLP:journals/corr/abs-1905-09418}、网络层\upcite{Liu2019RethinkingTV}以及其他部分的剪枝等\upcite{Liu2017LearningEC},还有一些方法也通过在训练期间采用正则化的方式来提升剪枝能力\upcite{DBLP:conf/iclr/FanGJ20}。量化方法主要通过截断浮点数来减少模型的存储大小,使其仅使用几个比特便能存储整个模型,虽然会导致舍入误差,但压缩效果显著\upcite{DBLP:journals/corr/abs-1906-00532,Cheong2019transformersZ,Banner2018ScalableMF,Hubara2017QuantizedNN}。知识蒸馏又名知识精炼,一些方法还将BERT 蒸馏成如LSTMS 等其他各种推断速度更快的架构\upcite{DBLP:journals/corr/HintonVD15,Munim2019SequencelevelKD,Tang2019DistillingTK}。另外还有一些其他方法不仅在输出上,还在权重矩阵和隐藏的激活层上对“教师模型”知识进行更深入的挖掘\upcite{Jiao2020TinyBERTDB}
\vspace{0.5em}
\item 目前的翻译模型使用交叉熵损失作为优化函数,这在自回归模型上取得了非常优秀的性能。交叉熵是一个严格的损失函数,预测时不在位置的单词都会受到惩罚,即使是编辑距离很小的输出序列。回归模型会避免这种惩罚,因为单词是根据句子前一个词来生成的,而非自回归模型无法获知这个信息。为此,一些研究工作通过改进损失函数来提高非自回归模型的性能。一种做法提出了对齐交叉熵函数\upcite{Ghazvininejad2020AlignedCE},其基于标签序列和目标词分布预测序列之间的对齐来计算交叉熵损失,采用动态规划的方法寻找单调对齐使交叉熵损失最小化。另一种做法提出了一种基于$n$元词袋的训练目标\upcite{Shao2020MinimizingTB},希望能最小化模型与参考译文间$n$元词袋的差异。该训练目标在$n$元组的层面上评估预测结果,因此能够建模序列依赖关系。
\vspace{0.5em}
\item 自回归模型预测目标句时,当前词的生成是以之前已生成的词作为条件的,已生成词提供了较强的目标端上下文信息。然而,非自回归模型并行地生成所有词,因此不存在这样的信息。与自回归模型相比,非自回归模型的解码器需要在信息更少的情况下执行翻译任务。因此很多做法通过给非自回归模型的解码器端引入更多的信息,来降低模型的搜索空间。一些研究工作\upcite{Ma2019FlowSeqNC}通过将条件随机场引入非自回归模型中来对结构依赖进行建模;另外一种引入了一个词嵌入转换矩阵来将源端的词嵌入转换为目标端的词嵌入来增强解码端的输入\upcite{Guo2019NonAutoregressiveNM};还有一种引入了一种轻量级的重排序模块来显式的建模重排序信息,以指导非自回归模型的解码\upcite{Ran2019GuidingNN}
\vspace{0.5em}
\end{itemize}
...@@ -5917,7 +5917,1582 @@ author = {Yoshua Bengio and ...@@ -5917,7 +5917,1582 @@ author = {Yoshua Bengio and
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% chapter 14------------------------------------------------------ %%%%% chapter 14------------------------------------------------------
@inproceedings{Koehn2007Moses,
author = {Philipp Koehn and
Hieu Hoang and
Alexandra Birch and
Chris Callison-Burch and
Marcello Federico and
Nicola Bertoldi and
Brooke Cowan and
Wade Shen and
Christine Moran and
Richard Zens and
Chris Dyer and
Ondrej Bojar and
Alexandra Constantin and
Evan Herbst},
title = {Moses: Open Source Toolkit for Statistical Machine Translation},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2007}
}
@inproceedings{DBLP:conf/amta/Koehn04,
author = {Philipp Koehn},
title = {Pharaoh: {A} Beam Search Decoder for Phrase-Based Statistical Machine
Translation Models},
volume = {3265},
pages = {115--124},
publisher = {Springer},
year = {2004}
}
@inproceedings{DBLP:conf/emnlp/StahlbergHSB17,
author = {Felix Stahlberg and
Eva Hasler and
Danielle Saunders and
Bill Byrne},
title = {SGNMT - A Flexible NMT Decoding Platform for Quick Prototyping
of New Models and Search Strategies},
booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural
Language Processing, {EMNLP} 2017, Copenhagen, Denmark, September
9-11, 2017 - System Demonstrations},
pages = {25--30},
publisher = {Association for Computational Linguistics},
year = {2017}
}
@inproceedings{Liu2016AgreementOT,
title={Agreement on Target-bidirectional Neural Machine Translation},
author={L. Liu and M. Utiyama and A. Finch and Eiichiro Sumita},
booktitle={HLT-NAACL},
year={2016}
}
@inproceedings{DBLP:conf/wmt/LiLXLLLWZXWFCLL19,
author = {Bei Li and
Yinqiao Li and
Chen Xu and
Ye Lin and
Jiqiang Liu and
Hui Liu and
Ziyang Wang and
Yuhao Zhang and
Nuo Xu and
Zeyang Wang and
Kai Feng and
Hexuan Chen and
Tengbo Liu and
Yanyang Li and
Qiang Wang and
Tong Xiao and
Jingbo Zhu},
title = {The NiuTrans Machine Translation Systems for {WMT19}},
booktitle = {Proceedings of the Fourth Conference on Machine Translation, {WMT}
2019, Florence, Italy, August 1-2, 2019 - Volume 2: Shared Task Papers,
Day 1},
pages = {257--266},
publisher = {Association for Computational Linguistics},
year = {2019}
}
@inproceedings{DBLP:conf/wmt/SennrichHB16,
author = {Rico Sennrich and
Barry Haddow and
Alexandra Birch},
title = {Edinburgh Neural Machine Translation Systems for {WMT} 16},
booktitle = {Proceedings of the First Conference on Machine Translation, {WMT}
2016, colocated with {ACL} 2016, August 11-12, Berlin, Germany},
pages = {371--376},
publisher = {The Association for Computer Linguistics},
year = {2016}
}
@article{Stahlberg2018TheUO,
title={The University of Cambridge's Machine Translation Systems for WMT18},
author={Felix Stahlberg and A. Gispert and B. Byrne},
journal={ArXiv},
year={2018},
volume={abs/1808.09465}
}
@inproceedings{DBLP:conf/aaai/ZhangSQLJW18,
author = {Xiangwen Zhang and
Jinsong Su and
Yue Qin and
Yang Liu and
Rongrong Ji and
Hongji Wang},
title = {Asynchronous Bidirectional Decoding for Neural Machine Translation},
booktitle = {Proceedings of the Thirty-Second {AAAI} Conference on Artificial Intelligence,
(AAAI-18), the 30th innovative Applications of Artificial Intelligence
(IAAI-18), and the 8th {AAAI} Symposium on Educational Advances in
Artificial Intelligence (EAAI-18), New Orleans, Louisiana, USA, February
2-7, 2018},
pages = {5698--5705},
publisher = {{AAAI} Press},
year = {2018}
}
@article{Li2017EnhancedNM,
title={Enhanced neural machine translation by learning from draft},
author={A. Li and Shiyue Zhang and D. Wang and T. Zheng},
journal={2017 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC)},
year={2017},
pages={1583-1587}
}
@inproceedings{ElMaghraby2018EnhancingTF,
title={Enhancing Translation from English to Arabic Using Two-Phase Decoder Translation},
author={Ayah ElMaghraby and Ahmed Rafea},
booktitle={IntelliSys},
year={2018}
}
@inproceedings{Geng2018AdaptiveMD,
title={Adaptive Multi-pass Decoder for Neural Machine Translation},
author={X. Geng and X. Feng and B. Qin and T. Liu},
booktitle={EMNLP},
year={2018}
}
@article{Lee2018DeterministicNN,
title={Deterministic Non-Autoregressive Neural Sequence Modeling by Iterative Refinement},
author={Jason Lee and Elman Mansimov and Kyunghyun Cho},
journal={ArXiv},
year={2018},
volume={abs/1802.06901}
}
@inproceedings{Gu2019LevenshteinT,
title={Levenshtein Transformer},
author={Jiatao Gu and Changhan Wang and Jake Zhao},
booktitle={NeurIPS},
year={2019}
}
@inproceedings{Guo2020JointlyMS,
title={Jointly Masked Sequence-to-Sequence Model for Non-Autoregressive Neural Machine Translation},
author={Junliang Guo and Linli Xu and E. Chen},
booktitle={ACL},
year={2020}
}
@article{Stahlberg2018AnOS,
title={An Operation Sequence Model for Explainable Neural Machine Translation},
author={Felix Stahlberg and Danielle Saunders and B. Byrne},
journal={ArXiv},
year={2018},
volume={abs/1808.09688}
}
@inproceedings{Stern2019InsertionTF,
title={Insertion Transformer: Flexible Sequence Generation via Insertion Operations},
author={Mitchell Stern and William Chan and J. Kiros and Jakob Uszkoreit},
booktitle={ICML},
year={2019}
}
@article{stling2017NeuralMT,
title={Neural machine translation for low-resource languages},
author={Robert {\"O}stling and J. Tiedemann},
journal={ArXiv},
year={2017},
volume={abs/1708.05729}
}
@article{Kikuchi2016ControllingOL,
title={Controlling Output Length in Neural Encoder-Decoders},
author={Yuta Kikuchi and Graham Neubig and Ryohei Sasano and H. Takamura and M. Okumura},
journal={ArXiv},
year={2016},
volume={abs/1609.09552}
}
@inproceedings{Takase2019PositionalET,
title={Positional Encoding to Control Output Sequence Length},
author={S. Takase and N. Okazaki},
booktitle={NAACL-HLT},
year={2019}
}
@inproceedings{Murray2018CorrectingLB,
title={Correcting Length Bias in Neural Machine Translation},
author={Kenton Murray and David Chiang},
booktitle={WMT},
year={2018}
}
@article{Sountsov2016LengthBI,
title={Length bias in Encoder Decoder Models and a Case for Global Conditioning},
author={Pavel Sountsov and Sunita Sarawagi},
journal={ArXiv},
year={2016},
volume={abs/1606.03402}
}
@inproceedings{Jean2015MontrealNM,
title={Montreal Neural Machine Translation Systems for WMT'15},
author={S. Jean and Orhan Firat and Kyunghyun Cho and R. Memisevic and Yoshua Bengio},
booktitle={WMT@EMNLP},
year={2015}
}
@inproceedings{Yang2018OtemUtemOA,
title={Otem{\&}Utem: Over- and Under-Translation Evaluation Metric for NMT},
author={J. Yang and Biao Zhang and Yue Qin and Xiangwen Zhang and Q. Lin and Jinsong Su},
booktitle={NLPCC},
year={2018}
}
@inproceedings{Mi2016CoverageEM,
title={Coverage Embedding Models for Neural Machine Translation},
author={Haitao Mi and B. Sankaran and Z. Wang and Abe Ittycheriah},
booktitle={EMNLP},
year={2016}
}
@article{Kazimi2017CoverageFC,
title={Coverage for Character Based Neural Machine Translation},
author={M. Kazimi and Marta R. Costa-juss{\`a}},
journal={Proces. del Leng. Natural},
year={2017},
volume={59},
pages={99-106}
}
@inproceedings{DBLP:conf/emnlp/HuangZM17,
author = {Liang Huang and
Kai Zhao and
Mingbo Ma},
title = {When to Finish? Optimal Beam Search for Neural Text Generation (modulo
beam size)},
pages = {2134--2139},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2017}
}
@inproceedings{Wiseman2016SequencetoSequenceLA,
title={Sequence-to-Sequence Learning as Beam-Search Optimization},
author={Sam Wiseman and Alexander M. Rush},
booktitle={EMNLP},
year={2016}
}
@inproceedings{DBLP:conf/emnlp/Yang0M18,
author = {Yilin Yang and
Liang Huang and
Mingbo Ma},
title = {Breaking the Beam Search Curse: {A} Study of (Re-)Scoring Methods
and Stopping Criteria for Neural Machine Translation},
pages = {3054--3059},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2018}
}
@article{Ma2019LearningTS,
title={Learning to Stop in Structured Prediction for Neural Machine Translation},
author={M. Ma and Renjie Zheng and Liang Huang},
journal={ArXiv},
year={2019},
volume={abs/1904.01032}
}
@inproceedings{KleinOpenNMT,
author = {Guillaume Klein and
Yoon Kim and
Yuntian Deng and
Jean Senellart and
Alexander M. Rush},
title = {OpenNMT: Open-Source Toolkit for Neural Machine Translation},
pages = {67--72},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2017}
}
@inproceedings{bahdanau2014neural,
author = {Dzmitry Bahdanau and
Kyunghyun Cho and
Yoshua Bengio},
title = {Neural Machine Translation by Jointly Learning to Align and Translate},
publisher = {International Conference on Learning Representations},
year = {2015}
}
@inproceedings{Eisner2011LearningST,
title={Learning Speed-Accuracy Tradeoffs in Nondeterministic Inference Algorithms},
author={J. Eisner and Hal Daum{\'e}},
year={2011}
}
@inproceedings{Jiang2012LearnedPF,
title={Learned Prioritization for Trading Off Accuracy and Speed},
author={J. Jiang and Adam R. Teichert and Hal Daum{\'e} and J. Eisner},
booktitle={NIPS},
year={2012}
}
@inproceedings{Zheng2020OpportunisticDW,
title={Opportunistic Decoding with Timely Correction for Simultaneous Translation},
author={Renjie Zheng and M. Ma and Baigong Zheng and Kaibo Liu and Liang Huang},
booktitle={ACL},
year={2020}
}
@inproceedings{Ma2019STACLST,
title={STACL: Simultaneous Translation with Implicit Anticipation and Controllable Latency using Prefix-to-Prefix Framework},
author={M. Ma and L. Huang and Hao Xiong and Renjie Zheng and Kaibo Liu and Baigong Zheng and Chuanqiang Zhang and Zhongjun He and Hairong Liu and X. Li and H. Wu and Haifeng Wang},
booktitle={ACL},
year={2019}
}
@inproceedings{Gimpel2013ASE,
title={A Systematic Exploration of Diversity in Machine Translation},
author={Kevin Gimpel and Dhruv Batra and Chris Dyer and Gregory Shakhnarovich},
booktitle={EMNLP},
year={2013}
}
@article{Li2016MutualIA,
title={Mutual Information and Diverse Decoding Improve Neural Machine Translation},
author={J. Li and Dan Jurafsky},
journal={ArXiv},
year={2016},
volume={abs/1601.00372}
}
@inproceedings{Li2016ADO,
title={A Diversity-Promoting Objective Function for Neural Conversation Models},
author={J. Li and Michel Galley and Chris Brockett and Jianfeng Gao and W. Dolan},
booktitle={HLT-NAACL},
year={2016}
}
@inproceedings{He2018SequenceTS,
title={Sequence to Sequence Mixture Model for Diverse Machine Translation},
author={Xuanli He and Gholamreza Haffari and Mohammad Norouzi},
booktitle={CoNLL},
year={2018}
}
@article{Shen2019MixtureMF,
title={Mixture Models for Diverse Machine Translation: Tricks of the Trade},
author={Tianxiao Shen and Myle Ott and M. Auli and Marc'Aurelio Ranzato},
journal={ArXiv},
year={2019},
volume={abs/1902.07816}
}
@article{Wu2020GeneratingDT,
title={Generating Diverse Translation from Model Distribution with Dropout},
author={Xuanfu Wu and Yang Feng and Chenze Shao},
journal={ArXiv},
year={2020},
volume={abs/2010.08178}
}
@inproceedings{Sun2020GeneratingDT,
title={Generating Diverse Translation by Manipulating Multi-Head Attention},
author={Zewei Sun and Shujian Huang and Hao-Ran Wei and Xin-Yu Dai and Jiajun Chen},
booktitle={AAAI},
year={2020}
}
@article{Vijayakumar2016DiverseBS,
title={Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models},
author={Ashwin K. Vijayakumar and Michael Cogswell and R. R. Selvaraju and Q. Sun and Stefan Lee and David J. Crandall and Dhruv Batra},
journal={ArXiv},
year={2016},
volume={abs/1610.02424}
}
@inproceedings{Liu2014SearchAwareTF,
title={Search-Aware Tuning for Machine Translation},
author={L. Liu and Liang Huang},
booktitle={EMNLP},
year={2014}
}
@inproceedings{Yu2013MaxViolationPA,
title={Max-Violation Perceptron and Forced Decoding for Scalable MT Training},
author={Heng Yu and Liang Huang and Haitao Mi and Kai Zhao},
booktitle={EMNLP},
year={2013}
}
@inproceedings{Stahlberg2019OnNS,
title={On NMT Search Errors and Model Errors: Cat Got Your Tongue?},
author={Felix Stahlberg and
B. Byrne},
booktitle={EMNLP/IJCNLP},
year={2019}
}
@inproceedings{Niehues2017AnalyzingNM,
title={Analyzing Neural MT Search and Model Performance},
author={J. Niehues and Eunah Cho and Thanh-Le Ha and Alexander H. Waibel},
booktitle={NMT@ACL},
year={2017}
}
@article{StahlbergNeural,
title={Neural Machine Translation: A Review},
author={Felix Stahlberg},
journal={Journal of Artificial Intelligence Research},
year={2020},
volume={69},
pages={343-418}
}
@article{Ranzato2016SequenceLT,
title={Sequence Level Training with Recurrent Neural Networks},
author={Marc'Aurelio Ranzato and S. Chopra and M. Auli and W. Zaremba},
journal={CoRR},
year={2016},
volume={abs/1511.06732}
}
@article{Bengio2015ScheduledSF,
title={Scheduled Sampling for Sequence Prediction with Recurrent Neural Networks},
author={S. Bengio and Oriol Vinyals and Navdeep Jaitly and Noam Shazeer},
journal={ArXiv},
year={2015},
volume={abs/1506.03099}
}
@article{Zhang2019BridgingTG,
title={Bridging the Gap between Training and Inference for Neural Machine Translation},
author={Wen Zhang and Y. Feng and Fandong Meng and Di You and Qun Liu},
journal={ArXiv},
year={2019},
volume={abs/1906.02448}
}
@inproceedings{DBLP:conf/acl/ShenCHHWSL16,
author = {Shiqi Shen and
Yong Cheng and
Zhongjun He and
Wei He and
Hua Wu and
Maosong Sun and
Yang Liu},
title = {Minimum Risk Training for Neural Machine Translation},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2016},
}
@article{Gage1994ANA,
title={A new algorithm for data compression},
author={P. Gage},
journal={The C Users Journal archive},
year={1994},
volume={12},
pages={23-38}
}
@inproceedings{DBLP:conf/acl/SennrichHB16a,
author = {Rico Sennrich and
Barry Haddow and
Alexandra Birch},
title = {Neural Machine Translation of Rare Words with Subword Units},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2016},
}
@inproceedings{DBLP:conf/emnlp/ZensSX12,
author = {Richard Zens and
Daisy Stanton and
Peng Xu},
title = {A Systematic Comparison of Phrase Table Pruning Techniques},
pages = {972--983},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2012}
}
@inproceedings{DBLP:conf/emnlp/JohnsonMFK07,
author = {Howard Johnson and
Joel D. Martin and
George F. Foster and
Roland Kuhn},
title = {Improving Translation Quality by Discarding Most of the Phrasetable},
pages = {967--975},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2007}
}
@inproceedings{DBLP:conf/emnlp/LingGTB12,
author = {Wang Ling and
Jo{\~{a}}o Gra{\c{c}}a and
Isabel Trancoso and
Alan W. Black},
title = {Entropy-based Pruning for Phrase-based Machine Translation},
pages = {962--971},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2012}
}
@article{Narang2017BlockSparseRN,
title={Block-Sparse Recurrent Neural Networks},
author={Sharan Narang and Eric Undersander and G. Diamos},
journal={ArXiv},
year={2017},
volume={abs/1711.02782}
}
@article{Gale2019TheSO,
title={The State of Sparsity in Deep Neural Networks},
author={T. Gale and E. Elsen and Sara Hooker},
journal={ArXiv},
year={2019},
volume={abs/1902.09574}
}
@article{Michel2019AreSH,
title={Are Sixteen Heads Really Better than One?},
author={Paul Michel and Omer Levy and Graham Neubig},
journal={ArXiv},
year={2019},
volume={abs/1905.10650}
}
@inproceedings{DBLP:journals/corr/abs-1905-09418,
author = {Elena Voita and
David Talbot and
Fedor Moiseev and
Rico Sennrich and
Ivan Titov},
title = {Analyzing Multi-Head Self-Attention: Specialized Heads Do the Heavy
Lifting, the Rest Can Be Pruned},
pages = {5797--5808},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2019},
}
@article{Kitaev2020ReformerTE,
author = {Nikita Kitaev and
Lukasz Kaiser and
Anselm Levskaya},
title = {Reformer: The Efficient Transformer},
journal = {International Conference on Learning Representations},
year = {2020}
}
@article{Katharopoulos2020TransformersAR,
title={Transformers are RNNs: Fast Autoregressive Transformers with Linear Attention},
author={Angelos Katharopoulos and Apoorv Vyas and Nikolaos Pappas and Franccois Fleuret},
journal={ArXiv},
year={2020},
volume={abs/2006.16236}
}
@inproceedings{Beal2003VariationalAF,
title={Variational algorithms for approximate Bayesian inference},
author={M. Beal},
year={2003}
}
@article{xiao2011language,
title ={Language Modeling for Syntax-Based Machine Translation Using Tree Substitution Grammars: A Case Study on Chinese-English Translation},
author ={Xiao, Tong and Zhu, Jingbo and Zhu, Muhua},
volume ={10},
number ={4},
pages ={1--29},
year ={2011},
publisher ={ACM Transactions on Asian Language Information Processing (TALIP)}
}
@inproceedings{Li2009VariationalDF,
title={Variational Decoding for Statistical Machine Translation},
author={Zhifei Li and J. Eisner and S. Khudanpur},
booktitle={ACL/IJCNLP},
year={2009}
}
@article{Bastings2019ModelingLS,
title={Modeling Latent Sentence Structure in Neural Machine Translation},
author={Jasmijn Bastings and W. Aziz and Ivan Titov and K. Sima'an},
journal={ArXiv},
year={2019},
volume={abs/1901.06436}
}
@article{Shah2018GenerativeNM,
title={Generative Neural Machine Translation},
author={Harshil Shah and D. Barber},
journal={ArXiv},
year={2018},
volume={abs/1806.05138}
}
@article{Su2018VariationalRN,
title={Variational Recurrent Neural Machine Translation},
author={Jinsong Su and Shan Wu and Deyi Xiong and Yaojie Lu and Xianpei Han and Biao Zhang},
journal={ArXiv},
year={2018},
volume={abs/1801.05119}
}
@inproceedings{DBLP:journals/corr/GehringAGYD17,
author = {Jonas Gehring and
Michael Auli and
David Grangier and
Denis Yarats and
Yann N. Dauphin},
title = {Convolutional Sequence to Sequence Learning},
publisher = {International Conference on Machine Learning},
volume = {70},
pages = {1243--1252},
year = {2017}
}
@inproceedings{Wei2019ImitationLF,
title={Imitation Learning for Non-Autoregressive Neural Machine Translation},
author={Bingzhen Wei and Mingxuan Wang and Hao Zhou and Junyang Lin and Xu Sun},
booktitle={ACL},
year={2019}
}
@inproceedings{Shao2019RetrievingSI,
title={Retrieving Sequential Information for Non-Autoregressive Neural Machine Translation},
author={Chenze Shao and Y. Feng and J. Zhang and Fandong Meng and X. Chen and Jie Zhou},
booktitle={ACL},
year={2019}
}
@article{Akoury2019SyntacticallyST,
title={Syntactically Supervised Transformers for Faster Neural Machine Translation},
author={Nader Akoury and Kalpesh Krishna and Mohit Iyyer},
journal={ArXiv},
year={2019},
volume={abs/1906.02780}
}
@article{Guo2020FineTuningBC,
title={Fine-Tuning by Curriculum Learning for Non-Autoregressive Neural Machine Translation},
author={Junliang Guo and Xu Tan and Linli Xu and Tao Qin and E. Chen and T. Liu},
journal={ArXiv},
year={2020},
volume={abs/1911.08717}
}
@inproceedings{Ran2020LearningTR,
title={Learning to Recover from Multi-Modality Errors for Non-Autoregressive Neural Machine Translation},
author={Qiu Ran and Yankai Lin and Peng Li and J. Zhou},
booktitle={ACL},
year={2020}
}
@article{Liu2020FastBERTAS,
title={FastBERT: a Self-distilling BERT with Adaptive Inference Time},
author={Weijie Liu and P. Zhou and Zhe Zhao and Zhiruo Wang and Haotang Deng and Q. Ju},
journal={ArXiv},
year={2020},
volume={abs/2004.02178}
}
@article{Elbayad2020DepthAdaptiveT,
title={Depth-Adaptive Transformer},
author={Maha Elbayad and Jiatao Gu and E. Grave and M. Auli},
journal={ArXiv},
year={2020},
volume={abs/1910.10073}
}
@article{Lan2020ALBERTAL,
title={ALBERT: A Lite BERT for Self-supervised Learning of Language Representations},
author={Zhenzhong Lan and Mingda Chen and Sebastian Goodman and Kevin Gimpel and Piyush Sharma and Radu Soricut},
journal={ArXiv},
year={2020},
volume={abs/1909.11942}
}
@inproceedings{Han2015LearningBW,
title={Learning both Weights and Connections for Efficient Neural Network},
author={Song Han and J. Pool and John Tran and W. Dally},
booktitle={NIPS},
year={2015}
}
@article{Lee2019SNIPSN,
title={SNIP: Single-shot Network Pruning based on Connection Sensitivity},
author={N. Lee and Thalaiyasingam Ajanthan and P. Torr},
journal={ArXiv},
year={2019},
volume={abs/1810.02340}
}
@article{Frankle2019TheLT,
title={The Lottery Ticket Hypothesis: Finding Sparse, Trainable Neural Networks},
author={Jonathan Frankle and Michael Carbin},
journal={arXiv: Learning},
year={2019}
}
@article{Brix2020SuccessfullyAT,
title={Successfully Applying the Stabilized Lottery Ticket Hypothesis to the Transformer Architecture},
author={Christopher Brix and P. Bahar and H. Ney},
journal={ArXiv},
year={2020},
volume={abs/2005.03454}
}
@article{Liu2019RethinkingTV,
title={Rethinking the Value of Network Pruning},
author={Zhuang Liu and M. Sun and Tinghui Zhou and Gao Huang and Trevor Darrell},
journal={ArXiv},
year={2019},
volume={abs/1810.05270}
}
@article{Liu2017LearningEC,
title={Learning Efficient Convolutional Networks through Network Slimming},
author={Zhuang Liu and J. Li and Zhiqiang Shen and Gao Huang and S. Yan and C. Zhang},
journal={2017 IEEE International Conference on Computer Vision (ICCV)},
year={2017},
pages={2755-2763}
}
@inproceedings{Cheong2019transformersZ,
title={transformers.zip : Compressing Transformers with Pruning and Quantization},
author={Robin Cheong},
year={2019}
}
@inproceedings{Banner2018ScalableMF,
title={Scalable Methods for 8-bit Training of Neural Networks},
author={R. Banner and Itay Hubara and E. Hoffer and Daniel Soudry},
booktitle={NeurIPS},
year={2018}
}
@article{Hubara2017QuantizedNN,
title={Quantized Neural Networks: Training Neural Networks with Low Precision Weights and Activations},
author={Itay Hubara and Matthieu Courbariaux and Daniel Soudry and Ran El-Yaniv and Yoshua Bengio},
journal={J. Mach. Learn. Res.},
year={2017},
volume={18},
pages={187:1-187:30}
}
@article{DBLP:journals/corr/HintonVD15,
author = {Geoffrey E. Hinton and
Oriol Vinyals and
Jeffrey Dean},
title = {Distilling the Knowledge in a Neural Network},
journal = {CoRR},
volume = {abs/1503.02531},
year = {2015}
}
@article{Munim2019SequencelevelKD,
title={Sequence-level Knowledge Distillation for Model Compression of Attention-based Sequence-to-sequence Speech Recognition},
author={Raden Mu'az Mun'im and N. Inoue and Koichi Shinoda},
journal={ICASSP 2019 - 2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
year={2019},
pages={6151-6155}
}
@article{Tang2019DistillingTK,
title={Distilling Task-Specific Knowledge from BERT into Simple Neural Networks},
author={Raphael Tang and Yao Lu and L. Liu and Lili Mou and Olga Vechtomova and Jimmy Lin},
journal={ArXiv},
year={2019},
volume={abs/1903.12136}
}
@inproceedings{Jiao2020TinyBERTDB,
title={TinyBERT: Distilling BERT for Natural Language Understanding},
author={Xiaoqi Jiao and Y. Yin and L. Shang and Xin Jiang and X. Chen and Linlin Li and F. Wang and Qun Liu},
booktitle={EMNLP},
year={2020}
}
@article{Ghazvininejad2020AlignedCE,
title={Aligned Cross Entropy for Non-Autoregressive Machine Translation},
author={Marjan Ghazvininejad and V. Karpukhin and Luke Zettlemoyer and Omer Levy},
journal={ArXiv},
year={2020},
volume={abs/2004.01655}
}
@inproceedings{Shao2020MinimizingTB,
title={Minimizing the Bag-of-Ngrams Difference for Non-Autoregressive Neural Machine Translation},
author={Chenze Shao and Jinchao Zhang and Yun-jie Feng and Fandong Meng and Jie Zhou},
booktitle={AAAI},
year={2020}
}
@inproceedings{Ma2019FlowSeqNC,
title={FlowSeq: Non-Autoregressive Conditional Sequence Generation with Generative Flow},
author={Xuezhe Ma and Chunting Zhou and X. Li and Graham Neubig and E. Hovy},
booktitle={EMNLP/IJCNLP},
year={2019}
}
@inproceedings{Guo2019NonAutoregressiveNM,
title={Non-Autoregressive Neural Machine Translation with Enhanced Decoder Input},
author={Junliang Guo and X. Tan and D. He and T. Qin and Linli Xu and T. Liu},
booktitle={AAAI},
year={2019}
}
@article{Ran2019GuidingNN,
title={Guiding Non-Autoregressive Neural Machine Translation Decoding with Reordering Information},
author={Qiu Ran and Yankai Lin and Peng Li and J. Zhou},
journal={ArXiv},
year={2019},
volume={abs/1911.02215}
}
@inproceedings{vaswani2017attention,
title={Attention is All You Need},
author={Ashish {Vaswani} and Noam {Shazeer} and Niki {Parmar} and Jakob {Uszkoreit} and Llion {Jones} and Aidan N. {Gomez} and Lukasz {Kaiser} and Illia {Polosukhin}},
publisher={International Conference on Neural Information Processing},
pages={5998--6008},
year={2017}
}
@inproceedings{Gu2017NonAutoregressiveNM,
author = {Jiatao Gu and
James Bradbury and
Caiming Xiong and
Victor O. K. Li and
Richard Socher},
title = {Non-Autoregressive Neural Machine Translation},
publisher = {International Conference on Learning Representations},
year = {2018}
}
@article{Zhou2020UnderstandingKD,
title={Understanding Knowledge Distillation in Non-autoregressive Machine Translation},
author={Chunting Zhou and Graham Neubig and Jiatao Gu},
journal={ArXiv},
year={2020},
volume={abs/1911.02727}
}
@inproceedings{Wang2019NonAutoregressiveMT,
title={Non-Autoregressive Machine Translation with Auxiliary Regularization},
author={Yiren Wang and Fei Tian and D. He and T. Qin and ChengXiang Zhai and T. Liu},
booktitle={AAAI},
year={2019}
}
@inproceedings{Kaiser2018FastDI,
title={Fast Decoding in Sequence Models using Discrete Latent Variables},
author={Łukasz Kaiser and Aurko Roy and Ashish Vaswani and Niki Parmar and S. Bengio and Jakob Uszkoreit and Noam Shazeer},
booktitle={ICML},
year={2018}
}
@article{Tu2020ENGINEEI,
title={ENGINE: Energy-Based Inference Networks for Non-Autoregressive Machine Translation},
author={Lifu Tu and Richard Yuanzhe Pang and Sam Wiseman and Kevin Gimpel},
journal={ArXiv},
year={2020},
volume={abs/2005.00850}
}
@inproceedings{Shu2020LatentVariableNN,
title={Latent-Variable Non-Autoregressive Neural Machine Translation with Deterministic Inference using a Delta Posterior},
author={Raphael Shu and Jason Lee and Hideki Nakayama and Kyunghyun Cho},
booktitle={AAAI},
year={2020}
}
@inproceedings{Li2019HintBasedTF,
title={Hint-Based Training for Non-Autoregressive Machine Translation},
author={Zhuohan Li and Zi Lin and Di He and Fei Tian and Tao Qin and Liwei Wang and T. Liu},
booktitle={EMNLP/IJCNLP},
year={2019}
}
@inproceedings{Ho2016ModelFreeIL,
title={Model-Free Imitation Learning with Policy Optimization},
author={Jonathan Ho and J. Gupta and S. Ermon},
booktitle={ICML},
year={2016}
}
@inproceedings{Ho2016GenerativeAI,
title={Generative Adversarial Imitation Learning},
author={Jonathan Ho and S. Ermon},
booktitle={NIPS},
year={2016}
}
@article{Duan2017OneShotIL,
title={One-Shot Imitation Learning},
author={Yan Duan and Marcin Andrychowicz and Bradly C. Stadie and Jonathan Ho and J. Schneider and Ilya Sutskever and P. Abbeel and W. Zaremba},
journal={ArXiv},
year={2017},
volume={abs/1703.07326}
}
@inproceedings{Wang2018SemiAutoregressiveNM,
title={Semi-Autoregressive Neural Machine Translation},
author={C. Wang and Ji Zhang and Haiqing Chen},
booktitle={EMNLP},
year={2018}
}
@inproceedings{Ghazvininejad2019MaskPredictPD,
title={Mask-Predict: Parallel Decoding of Conditional Masked Language Models},
author={Marjan Ghazvininejad and Omer Levy and Yinhan Liu and Luke Zettlemoyer},
booktitle={EMNLP/IJCNLP},
year={2019}
}
@article{Kasai2020NonAutoregressiveMT,
title={Non-Autoregressive Machine Translation with Disentangled Context Transformer},
author={Jungo Kasai and J. Cross and Marjan Ghazvininejad and Jiatao Gu},
journal={arXiv: Computation and Language},
year={2020}
}
@article{Zhou2019SynchronousBN,
title={Synchronous Bidirectional Neural Machine Translation},
author={L. Zhou and Jiajun Zhang and C. Zong},
journal={Transactions of the Association for Computational Linguistics},
year={2019},
volume={7},
pages={91-105}
}
@article{devlin2019bert,
title={Bert: Pre-training of deep bidirectional transformers for language understanding},
author={Devlin Jacob and Chang Ming-Wei and Lee Kenton and Toutanova Kristina},
year={2019},
pages = {4171--4186},
journal = {Annual Meeting of the Association for Computational Linguistics},
}
@inproceedings{Feng2016ImprovingAM,
title={Improving Attention Modeling with Implicit Distortion and Fertility for Machine Translation},
author={Shi Feng and Shujie Liu and Nan Yang and Mu Li and M. Zhou and K. Q. Zhu},
booktitle={COLING},
year={2016}
}
@inproceedings{TuModeling,
author = {Zhaopeng Tu and
Zhengdong Lu and
Yang Liu and
Xiaohua Liu and
Hang Li},
title = {Modeling Coverage for Neural Machine Translation},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2016}
}
@article{Wu2016GooglesNM,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author = {Yonghui Wu and
Mike Schuster and
Zhifeng Chen and
Quoc V. Le and
Mohammad Norouzi and
Wolfgang Macherey and
Maxim Krikun and
Yuan Cao and
Qin Gao and
Klaus Macherey and
Jeff Klingner and
Apurva Shah and
Melvin Johnson and
Xiaobing Liu and
Lukasz Kaiser and
Stephan Gouws and
Yoshikiyo Kato and
Taku Kudo and
Hideto Kazawa and
Keith Stevens and
George Kurian and
Nishant Patil and
Wei Wang and
Cliff Young and
Jason Smith and
Jason Riesa and
Alex Rudnick and
Oriol Vinyals and
Greg Corrado and
Macduff Hughes and
Jeffrey Dean},
journal = {CoRR},
year={2016},
volume={abs/1609.08144}
}
@inproceedings{li-etal-2018-simple,
author = {Yanyang Li and
Tong Xiao and
Yinqiao Li and
Qiang Wang and
Changming Xu and
Jingbo Zhu},
title = {A Simple and Effective Approach to Coverage-Aware Neural Machine Translation},
publisher = {Annual Meeting of the Association for Computational Linguistics},
pages = {292--297},
year = {2018}
}
@article{Peris2017InteractiveNM,
title={Interactive neural machine translation},
author={{\'A}lvaro Peris and Miguel Domingo and F. Casacuberta},
journal={Comput. Speech Lang.},
year={2017},
volume={45},
pages={201-220}
}
@inproceedings{Peris2018ActiveLF,
title={Active Learning for Interactive Neural Machine Translation of Data Streams},
author={{\'A}lvaro Peris and F. Casacuberta},
booktitle={CoNLL},
year={2018}
}
@article{Xiao2016ALA,
title={A Loss-Augmented Approach to Training Syntactic Machine Translation Systems},
author={Tong Xiao and Derek F. Wong and Jingbo Zhu},
journal={IEEE/ACM Transactions on Audio, Speech, and Language Processing},
year={2016},
volume={24},
pages={2069-2083}
}
@inproceedings{DBLP:conf/acl/JeanCMB15,
author = {S{\'{e}}bastien Jean and
KyungHyun Cho and
Roland Memisevic and
Yoshua Bengio},
title = {On Using Very Large Target Vocabulary for Neural Machine Translation},
pages = {1--10},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2015}
}
@article{61115,
author={J. {Lin}},
journal={IEEE Transactions on Information Theory},
title={Divergence measures based on the Shannon entropy},
year={1991},
volume={37},
number={1},
pages={145-151}
}
@inproceedings{DBLP:conf/aaai/DabreF19,
author = {Raj Dabre and
Atsushi Fujita},
title = {Recurrent Stacking of Layers for Compact Neural Machine Translation
Models},
booktitle = {The Thirty-Third {AAAI} Conference on Artificial Intelligence, {AAAI}
2019, The Thirty-First Innovative Applications of Artificial Intelligence
Conference, {IAAI} 2019, The Ninth {AAAI} Symposium on Educational
Advances in Artificial Intelligence, {EAAI} 2019, Honolulu, Hawaii,
USA, January 27 - February 1, 2019},
pages = {6292--6299},
publisher = {{AAAI} Press},
year = {2019}
}
@inproceedings{DBLP:journals/corr/abs-1805-00631,
author = {Biao Zhang and
Deyi Xiong and
Jinsong Su},
title = {Accelerating Neural Transformer via an Average Attention Network},
publisher = {Annual Meeting of the Association for Computational Linguistics},
pages = {1789--1798},
year = {2018},
}
@inproceedings{Wu2019PayLA,
author = {Felix Wu and
Angela Fan and
Alexei Baevski and
Yann N. Dauphin and
Michael Auli},
title = {Pay Less Attention with Lightweight and Dynamic Convolutions},
publisher = {International Conference on Learning Representations},
year = {2019},
}
@inproceedings{Xiao2019SharingAW,
author = {Tong Xiao and
Yinqiao Li and
Jingbo Zhu and
Zhengtao Yu and
Tongran Liu},
title = {Sharing Attention Weights for Fast Transformer},
publisher = {International Joint Conference on Artificial Intelligence},
pages = {5292--5298},
year = {2019}
}
@inproceedings{Chen2018TheBO,
author = {Mia Xu Chen and
Orhan Firat and
Ankur Bapna and
Melvin Johnson and
Wolfgang Macherey and
George F. Foster and
Llion Jones and
Mike Schuster and
Noam Shazeer and
Niki Parmar and
Ashish Vaswani and
Jakob Uszkoreit and
Lukasz Kaiser and
Zhifeng Chen and
Yonghui Wu and
Macduff Hughes},
title = {The Best of Both Worlds: Combining Recent Advances in Neural Machine
Translation},
pages = {76--86},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2018}
}
@article{DBLP:journals/corr/abs-1906-00532,
author = {Aishwarya Bhandare and
Vamsi Sripathi and
Deepthi Karkada and
Vivek Menon and
Sun Choi and
Kushal Datta and
Vikram Saletore},
title = {Efficient 8-Bit Quantization of Transformer Neural Machine Language
Translation Model},
journal = {CoRR},
volume = {abs/1906.00532},
year = {2019}
}
@inproceedings{DBLP:conf/cvpr/JacobKCZTHAK18,
author = {Benoit Jacob and
Skirmantas Kligys and
Bo Chen and
Menglong Zhu and
Matthew Tang and
Andrew G. Howard and
Hartwig Adam and
Dmitry Kalenichenko},
title = {Quantization and Training of Neural Networks for Efficient Integer-Arithmetic-Only
Inference},
booktitle = {2018 {IEEE} Conference on Computer Vision and Pattern Recognition,
{CVPR} 2018, Salt Lake City, UT, USA, June 18-22, 2018},
pages = {2704--2713},
publisher = {{IEEE} Computer Society},
year = {2018}
}
@article{DBLP:journals/corr/abs-1910-10485,
author = {Gabriele Prato and
Ella Charlaix and
Mehdi Rezagholizadeh},
title = {Fully Quantized Transformer for Improved Translation},
journal = {CoRR},
volume = {abs/1910.10485},
year = {2019}
}
@inproceedings{DBLP:conf/nips/HubaraCSEB16,
author = {Itay Hubara and
Matthieu Courbariaux and
Daniel Soudry and
Ran El-Yaniv and
Yoshua Bengio},
title = {Binarized Neural Networks},
booktitle = {Advances in Neural Information Processing Systems 29: Annual Conference
on Neural Information Processing Systems 2016, December 5-10, 2016,
Barcelona, Spain},
pages = {4107--4115},
year = {2016}
}
@article{DBLP:journals/jcss/FreundS97,
author = {Yoav Freund and
Robert E. Schapire},
title = {A Decision-Theoretic Generalization of On-Line Learning and an Application
to Boosting},
journal = {Journal of Computer and System Sciences},
volume = {55},
number = {1},
pages = {119--139},
year = {1997}
}
@inproceedings{DBLP:conf/acl/XiaoZZW10,
author = {Tong Xiao and
Jingbo Zhu and
Muhua Zhu and
Huizhen Wang},
title = {Boosting-Based System Combination for Machine Translation},
booktitle = {{ACL} 2010, Proceedings of the 48th Annual Meeting of the Association
for Computational Linguistics, July 11-16, 2010, Uppsala, Sweden},
pages = {739--748},
publisher = {The Association for Computer Linguistics},
year = {2010}
}
@inproceedings{DBLP:conf/icassp/SimBGSW07,
author = {Khe Chai Sim and
William J. Byrne and
Mark J. F. Gales and
Hichem Sahbi and
Philip C. Woodland},
title = {Consensus Network Decoding for Statistical Machine Translation System
Combination},
booktitle = {Proceedings of the {IEEE} International Conference on Acoustics, Speech,
and Signal Processing, {ICASSP} 2007, Honolulu, Hawaii, USA, April
15-20, 2007},
pages = {105--108},
publisher = {{IEEE}},
year = {2007}
}
@inproceedings{DBLP:conf/acl/RostiMS07,
author = {Antti-Veikko I. Rosti and
Spyridon Matsoukas and
Richard M. Schwartz},
title = {Improved Word-Level System Combination for Machine Translation},
booktitle = {{ACL} 2007, Proceedings of the 45th Annual Meeting of the Association
for Computational Linguistics, June 23-30, 2007, Prague, Czech Republic},
publisher = {The Association for Computational Linguistics},
year = {2007}
}
@inproceedings{DBLP:conf/wmt/RostiZMS08,
author = {Antti-Veikko I. Rosti and
Bing Zhang and
Spyros Matsoukas and
Richard M. Schwartz},
title = {Incremental Hypothesis Alignment for Building Confusion Networks with
Application to Machine Translation System Combination},
booktitle = {Proceedings of the Third Workshop on Statistical Machine Translation,
WMT@ACL 2008, Columbus, Ohio, USA, June 19, 2008},
pages = {183--186},
publisher = {Association for Computational Linguistics},
year = {2008}
}
@inproceedings{DBLP:conf/emnlp/DuanLXZ09,
author = {Nan Duan and
Mu Li and
Tong Xiao and
Ming Zhou},
title = {The Feature Subspace Method for SMT System Combination},
booktitle = {Proceedings of the 2009 Conference on Empirical Methods in Natural
Language Processing, {EMNLP} 2009, 6-7 August 2009, Singapore, {A}
meeting of SIGDAT, a Special Interest Group of the {ACL}},
pages = {1096--1104},
publisher = {{ACL}},
year = {2009}
}
@article{DBLP:journals/corr/LiMJ16,
author = {Jiwei Li and
Will Monroe and
Dan Jurafsky},
title = {A Simple, Fast Diverse Decoding Algorithm for Neural Generation},
journal = {CoRR},
volume = {abs/1611.08562},
year = {2016}
}
@article{xiao2013bagging,
title ={Bagging and boosting statistical machine translation systems},
author ={Tong Xiao and Jingbo Zhu and Tongran Liu },
publisher ={Artificial Intelligence},
volume ={195},
pages ={496--527},
year ={2013}
}
@inproceedings{DBLP:conf/emnlp/TrombleKOM08,
author = {Roy Tromble and
Shankar Kumar and
Franz Josef Och and
Wolfgang Macherey},
title = {Lattice Minimum Bayes-Risk Decoding for Statistical Machine Translation},
booktitle = {2008 Conference on Empirical Methods in Natural Language Processing,
{EMNLP} 2008, Proceedings of the Conference, 25-27 October 2008, Honolulu,
Hawaii, USA, {A} meeting of SIGDAT, a Special Interest Group of the
{ACL}},
pages = {620--629},
publisher = {{ACL}},
year = {2008}
}
@inproceedings{DBLP:conf/aaai/SuTXJSL17,
author = {Jinsong Su and
Zhixing Tan and
Deyi Xiong and
Rongrong Ji and
Xiaodong Shi and
Yang Liu},
title = {Lattice-Based Recurrent Neural Network Encoders for Neural Machine
Translation},
booktitle = {Proceedings of the Thirty-First {AAAI} Conference on Artificial Intelligence,
February 4-9, 2017, San Francisco, California, {USA}},
pages = {3302--3308},
publisher = {{AAAI} Press},
year = {2017}
}
@inproceedings{Shaw2018SelfAttentionWR,
author = {Peter Shaw and
Jakob Uszkoreit and
Ashish Vaswani},
title = {Self-Attention with Relative Position Representations},
publisher = {Proceedings of the Human Language Technology Conference of
the North American Chapter of the Association for Computational Linguistics},
pages = {464--468},
year = {2018},
}
@inproceedings{WangLearning,
author = {Qiang Wang and
Bei Li and
Tong Xiao and
Jingbo Zhu and
Changliang Li and
Derek F. Wong and
Lidia S. Chao},
title = {Learning Deep Transformer Models for Machine Translation},
pages = {1810--1822},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2019}
}
@inproceedings{DBLP:conf/iclr/FanGJ20,
author = {Angela Fan and
Edouard Grave and
Armand Joulin},
title = {Reducing Transformer Depth on Demand with Structured Dropout},
booktitle = {8th International Conference on Learning Representations, {ICLR} 2020,
Addis Ababa, Ethiopia, April 26-30, 2020},
publisher = {OpenReview.net},
year = {2020}
}
@inproceedings{DBLP:conf/emnlp/WangXZ20,
author = {Qiang Wang and
Tong Xiao and
Jingbo Zhu},
editor = {Trevor Cohn and
Yulan He and
Yang Liu},
title = {Training Flexible Depth Model by Multi-Task Learning for Neural Machine
Translation},
booktitle = {Proceedings of the 2020 Conference on Empirical Methods in Natural
Language Processing: Findings, {EMNLP} 2020, Online Event, 16-20 November
2020},
pages = {4307--4312},
publisher = {Association for Computational Linguistics},
year = {2020}
}
@article{DBLP:journals/corr/abs-2002-02925,
author = {Canwen Xu and
Wangchunshu Zhou and
Tao Ge and
Furu Wei and
Ming Zhou},
title = {BERT-of-Theseus: Compressing {BERT} by Progressive Module Replacing},
journal = {CoRR},
volume = {abs/2002.02925},
year = {2020}
}
@inproceedings{DBLP:conf/iclr/BaevskiA19,
author = {Alexei Baevski and
Michael Auli},
title = {Adaptive Input Representations for Neural Language Modeling},
booktitle = {7th International Conference on Learning Representations, {ICLR} 2019,
New Orleans, LA, USA, May 6-9, 2019},
publisher = {OpenReview.net},
year = {2019}
}
@article{DBLP:journals/corr/abs-2006-04768,
author = {Sinong Wang and
Belinda Z. Li and
Madian Khabsa and
Han Fang and
Hao Ma},
title = {Linformer: Self-Attention with Linear Complexity},
journal = {CoRR},
volume = {abs/2006.04768},
year = {2020}
}
@article{DBLP:journals/corr/abs-1911-12385,
author = {Sachin Mehta and
Rik Koncel-Kedziorski and
Mohammad Rastegari and
Hannaneh Hajishirzi},
title = {DeFINE: DEep Factorized INput Word Embeddings for Neural Sequence
Modeling},
journal = {CoRR},
volume = {abs/1911.12385},
year = {2019}
}
@article{DBLP:journals/corr/abs-1906-09777,
author = {Xindian Ma and
Peng Zhang and
Shuai Zhang and
Nan Duan and
Yuexian Hou and
Dawei Song and
Ming Zhou},
title = {A Tensorized Transformer for Language Modeling},
journal = {CoRR},
volume = {abs/1906.09777},
year = {2019}
}
@inproceedings{DBLP:conf/nips/YangLSL19,
author = {Zhilin Yang and
Thang Luong and
Ruslan Salakhutdinov and
Quoc V. Le},
title = {Mixtape: Breaking the Softmax Bottleneck Efficiently},
booktitle = {Advances in Neural Information Processing Systems 32: Annual Conference
on Neural Information Processing Systems 2019, NeurIPS 2019, 8-14
December 2019, Vancouver, BC, Canada},
pages = {15922--15930},
year = {2019}
}
@article{DBLP:journals/corr/abs-2006-10369,
author = {Jungo Kasai and
Nikolaos Pappas and
Hao Peng and
James Cross and
Noah A. Smith},
title = {Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff
in Machine Translation},
journal = {CoRR},
volume = {abs/2006.10369},
year = {2020}
}
@inproceedings{DBLP:conf/aclnmt/HuLLLLWXZ20,
author = {Chi Hu and
Bei Li and
Yinqiao Li and
Ye Lin and
Yanyang Li and
Chenglong Wang and
Tong Xiao and
Jingbo Zhu},
title = {The NiuTrans System for {WNGT} 2020 Efficiency Task},
booktitle = {Proceedings of the Fourth Workshop on Neural Generation and Translation,
NGT@ACL 2020, Online, July 5-10, 2020},
pages = {204--210},
publisher = {Association for Computational Linguistics},
year = {2020}
}
@article{DBLP:journals/corr/abs-2010-02416,
author = {Yi-Te Hsu and
Sarthak Garg and
Yi-Hsiu Liao and
Ilya Chatsviorkin},
title = {Efficient Inference For Neural Machine Translation},
journal = {CoRR},
volume = {abs/2010.02416},
year = {2020}
}
@inproceedings{Vaswani2018Tensor2TensorFN,
author = {Ashish Vaswani and
Samy Bengio and
Eugene Brevdo and
Fran{\c{c}}ois Chollet and
Aidan N. Gomez and
Stephan Gouws and
Llion Jones and
Lukasz Kaiser and
Nal Kalchbrenner and
Niki Parmar and
Ryan Sepassi and
Noam Shazeer and
Jakob Uszkoreit},
title = {Tensor2Tensor for Neural Machine Translation},
pages = {193--199},
publisher = {Association for Machine Translation in the Americas},
year = {2018}
}
@inproceedings{Sun2019BaiduNM,
title={Baidu Neural Machine Translation Systems for WMT19},
author={M. Sun and
B. Jiang and
H. Xiong and
Zhongjun He and
H. Wu and
Haifeng Wang},
booktitle={WMT},
year={2019}
}
@inproceedings{Wang2018TencentNM,
title={Tencent Neural Machine Translation Systems for WMT18},
author={Mingxuan Wang and
Li Gong and
Wenhuan Zhu and
J. Xie and
C. Bian},
booktitle={WMT},
year={2018}
}
@article{Bi2019MultiagentLF,
title={Multi-agent Learning for Neural Machine Translation},
author={Tianchi Bi and
H. Xiong and
Zhongjun He and
H. Wu and
Haifeng Wang},
journal={ArXiv},
year={2019},
volume={abs/1909.01101}
}
@inproceedings{DBLP:conf/aclnmt/KoehnK17,
author = {Philipp Koehn and
Rebecca Knowles},
title = {Six Challenges for Neural Machine Translation},
pages = {28--39},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2017}
}
@inproceedings{Held2013AppliedSI,
title={Applied Statistical Inference: Likelihood and Bayes},
author={L. Held and Daniel Sabans Bov},
year={2013}
}
@inproceedings{Silvey2018StatisticalI,
title={Statistical Inference},
author={S. D. Silvey},
booktitle={Encyclopedia of Social Network Analysis and Mining. 2nd Ed.},
year={2018}
}
@inproceedings{Zhang2016VariationalNM,
title={Variational Neural Machine Translation},
author={Biao Zhang and Deyi Xiong and Jinsong Su and H. Duan and Min Zhang},
booktitle={EMNLP},
year={2016}
}
%%%%% chapter 14------------------------------------------------------ %%%%% chapter 14------------------------------------------------------
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论