Commit 569f7cf8 by 单韦乔

合并分支 'shanweiqiao' 到 'caorunzhe'

十五章现有内容整理以及部分格式修改

查看合并请求 !286
parents 7f08f351 7f29f545
\begin{tikzpicture}
\begin{scope}
\node [anchor=north,rectangle, inner sep=0mm,minimum height=1.2em,minimum width=2em,rounded corners=5pt,thick] (n1) at (0, 0) {编码端};
\node [anchor=west,rectangle, inner sep=0mm,minimum height=1.2em,minimum width=0em,rounded corners=5pt,thick] (n2) at ([xshift=3.5em,yshift=-0.5em]n1.east) {$z_0$};
\node [anchor=west,rectangle,draw, inner sep=0mm,minimum height=1.2em,minimum width=3em,fill=orange!20,rounded corners=5pt,thick] (n3) at ([xshift=3.5em,yshift=0em]n2.east) {$z_1$};
\node [anchor=west,rectangle,draw, inner sep=0mm,minimum height=1.2em,minimum width=3em,fill=orange!20,rounded corners=5pt,thick] (n4) at ([xshift=3.5em,yshift=0em]n3.east) {$z_2$};
\node [anchor=west,rectangle, inner sep=0mm,minimum height=1.2em,minimum width=1em,rounded corners=5pt,thick] (n6) at ([xshift=1.5em,yshift=0em]n4.east) {$\ldots$};
\node [anchor=west,rectangle,draw, inner sep=0mm,minimum height=1.2em,minimum width=3em,fill=orange!20,rounded corners=5pt,thick] (n5) at ([xshift=3.5em,yshift=0em]n6.east) {$z_{l}$};
\node [anchor=west,rectangle,draw, inner sep=0mm,minimum height=1.2em,minimum width=3em,fill=orange!20,rounded corners=5pt,thick] (n7) at ([xshift=1.5em,yshift=0em]n5.east) {$z_{l+1}$};
\node [anchor=north,rectangle,draw, inner sep=0mm,minimum height=1.2em,minimum width=15em,fill=teal!17,rounded corners=5pt,thick] (n8) at ([xshift=0em,yshift=-3em]n4.south) {层正则化};
\node [anchor=north,rectangle,draw, inner sep=0mm,minimum height=1.2em,minimum width=15em,fill=purple!17,rounded corners=5pt,thick] (n9) at ([xshift=0em,yshift=-1em]n8.south) {$L_0\ \quad L_1\ \quad L_2\quad \ldots \quad\ L_l$};
\node [anchor=north,rectangle,draw, inner sep=0mm,minimum height=1.2em,minimum width=15em,fill=teal!17,rounded corners=5pt,thick] (n10) at ([xshift=0em,yshift=-2em]n9.south) {权重累加};
\node [anchor=west,rectangle, inner sep=0mm,minimum height=1.2em, rounded corners=5pt,thick] (n11) at ([xshift=0em,yshift=-4.5em]n1.west) {聚合网络};
\node [anchor=east,rectangle, inner sep=0mm,minimum height=1.2em,minimum width=9em,rounded corners=5pt,thick] (n12) at ([xshift=0em,yshift=-4.5em]n7.east) {};
\node [anchor=south,rectangle, inner sep=0mm,minimum height=1em,minimum width=1em,rounded corners=5pt,thick] (n13) at ([xshift=0em,yshift=1em]n8.north) {};
\begin{pgfonlayer}{background}
{
\node[rectangle,inner sep=2pt,fill=blue!7] [fit = (n1) (n7) (n13)] (bg1) {};
\node[rectangle,inner sep=2pt,fill=red!7] [fit = (n10) (n8) (n11) (n12)] (bg2) {};
}
\end{pgfonlayer}
\draw[->,thick] ([xshift=0.5em,yshift=-0em]n2.south)..controls +(south:2em) and +(north:2em)..([xshift=-0em,yshift=-0em]n8.north) ;
\draw[->,thick] ([xshift=-0em,yshift=-0em]n3.south)..controls +(south:2em) and +(north:2em)..([xshift=-0em,yshift=-0em]n8.north) ;
\draw[->,thick] ([xshift=-0em,yshift=-0em]n5.south)..controls +(south:2em) and +(north:2em)..([xshift=-0em,yshift=-0em]n8.north) ;
\draw [->,thick] ([xshift=0em,yshift=0em]n4.south) -- ([xshift=0em,yshift=0em]n8.north);
\draw [->,thick] ([xshift=0em,yshift=0em]n8.south) -- ([xshift=0em,yshift=0em]n9.north);
\draw[->,thick] ([xshift=-4.5em,yshift=-0em]n9.south)..controls +(south:0.8em) and +(north:0.8em)..([xshift=-0em,yshift=-0em]n10.north) ;
\draw[->,thick] ([xshift=-2em,yshift=-0em]n9.south)..controls +(south:0.8em) and +(north:0.8em)..([xshift=-0em,yshift=-0em]n10.north) ;
\draw[->,thick] ([xshift=0em,yshift=-0em]n9.south)..controls +(south:0.8em) and +(north:0.8em)..([xshift=-0em,yshift=-0em]n10.north) ;
\draw[->,thick] ([xshift=4.5em,yshift=-0em]n9.south)..controls +(south:0.8em) and +(north:0.8em)..([xshift=-0em,yshift=-0em]n10.north) ;
\draw[->,thick] ([xshift=0em,yshift=-0em]n10.east)..controls +(east:5em) and +(south:1.5em)..([xshift=-0em,yshift=-0em]n7.south) ;
\end{scope}
\end{tikzpicture}
\ No newline at end of file
%%
\begin{center}
\begin{tikzpicture}
\begin{scope}[scale=0.6]
\node [anchor=east,fill=red!50,draw,rounded corners=3pt] (s11) at (-0.5em, 0) {\footnotesize{sublayer1}};
\node [anchor=west,draw,circle,line width=1pt] (c11) at ([xshift=2em]s11.east) {};
\node [anchor=north,fill=red!10,draw,dotted,rounded corners=3pt] (s21) at ([yshift=-3em]s11.south) {\footnotesize{sublayer1}};
\node [anchor=west, draw,circle,dotted,line width=1pt] (c21) at ([xshift=2em]s21.east) {};
\node [anchor=west,fill=red!10,draw,dotted,rounded corners=3pt] (s22) at ([xshift=2em]c21.east) {\footnotesize{sublayer2}};
\node [anchor=west, draw,circle,dotted,line width=1pt] (c22) at ([xshift=2em]s22.east) {};
\node [anchor=north,fill=red!50,draw,rounded corners=3pt] (s31) at ([yshift=-3em]s21.south) {\footnotesize{sublayer1}};
\node [anchor=west,draw,circle,line width=1pt] (c31) at ([xshift=2em]s31.east) {};
\node [anchor=north,fill=red!10,draw,dotted,rounded corners=3pt] (s41) at ([yshift=-3em]s31.south) {\footnotesize{sublayer1}};
\node [anchor=east, draw,circle,line width=1pt] (c44) at ([xshift=-2em]s41.west) {};
\node [anchor=west, draw,circle,dotted,line width=1pt] (c41) at ([xshift=2em]s41.east) {};
\node [anchor=west,fill=red!10,draw,dotted,rounded corners=3pt] (s42) at ([xshift=2em]c41.east) {\footnotesize{sublayer2}};
\node [anchor=west, draw,circle,dotted,line width=1pt] (c42) at ([xshift=2em]s42.east) {};
\node [anchor=west,fill=red!50,draw,rounded corners=3pt] (s43) at ([xshift=2em]c42.east) {\footnotesize{sublayer3}};
\node [anchor=west, draw,circle,line width=1pt] (c43) at ([xshift=2em]s43.east) {};
\draw[-,rounded corners,line width=1pt] (c44.east) -- ([xshift=0.8em]c44.east) -- ([xshift=-1.2em,yshift=2.3em]s11.west) -- ([xshift=2.8em,,yshift=2.3em]s11.east) -- (c11.north);
\draw[-,rounded corners,line width=1pt] (c44.east) -- ([xshift=0.8em]c44.east) -- ([xshift=-1.2em]s11.west) -- (s11.west);
\draw[-,rounded corners,line width=1pt] (s11.east) -- (c11.west);
\draw[-,rounded corners,line width=1pt] (c11.east) -- ([xshift=11.3em]c11.east) -- (c22.north);
\draw[-,rounded corners,line width=1pt,dotted] (c44.east) -- ([xshift=0.8em]c44.east) -- ([xshift=-1.2em,yshift=2.3em]s21.west) -- ([xshift=2.7em,,yshift=2.3em]s21.east) -- (c21.north);
\draw[-,rounded corners,line width=1pt,dotted] (c44.east) -- ([xshift=0.8em]c44.east) -- ([xshift=-1.2em]s21.west) -- (s21.west);
\draw[-,rounded corners,line width=1pt,dotted] (s21.east) -- (c21.west);
\draw[-,rounded corners,line width=1pt,dotted] (c21.east) -- (s22.west);
\draw[-,rounded corners,line width=1pt,dotted] (s22.east) -- (c22.west);
\draw[-,rounded corners,line width=1pt] (c22.east) -- ([xshift=11.3em]c22.east) -- (c43.north);
\draw[-,rounded corners,line width=1pt] (c44.east) -- ([xshift=0.8em]c44.east) -- ([xshift=-1.2em,yshift=2.3em]s31.west) -- ([xshift=2.7em,,yshift=2.3em]s31.east) -- (c31.north);
\draw[-,rounded corners,line width=1pt] (c44.east) -- ([xshift=0.8em]c44.east) -- ([xshift=-1.2em]s31.west) -- (s31.west);
\draw[-,rounded corners,line width=1pt] (s31.east) -- (c31.west);
\draw[-,rounded corners,line width=1pt] (c31.east) -- ([xshift=11.3em]c31.east) -- (c42.north);
\draw[-,rounded corners,line width=1pt,dotted] (c44.east) -- ([xshift=0.8em]c44.east) -- ([xshift=-1.2em,yshift=2.3em]s41.west) -- ([xshift=2.7em,,yshift=2.3em]s41.east) -- (c41.north);
\draw[-,rounded corners,line width=1pt,dotted] (c44.east) -- (s41.west);
\draw[-,rounded corners,line width=1pt,dotted] (s41.east) -- (c41.west);
\draw[-,rounded corners,line width=1pt,dotted] (c41.east) -- (s42.west);
\draw[-,rounded corners,line width=1pt,dotted] (s42.east) -- (c42.west);
\draw[-,rounded corners,line width=1pt] (c42.east) -- (s43.west);
\draw[-,rounded corners,line width=1pt] (s43.east) -- (c43.west);
\draw[->,rounded corners,line width=1pt] (c43.east) -- ([xshift=2em]c43.east);
\end{scope}
\end{tikzpicture}
\end{center}
\begin{center}
\begin{tikzpicture}[scale=1.0]
\footnotesize{
\begin{axis}[
width=.50\textwidth,
height=.40\textwidth,
legend style={at={(0.60,0.08)}, anchor=south west},
xlabel={\scriptsize{更新次数(10k)}},
ylabel={\scriptsize{学习率 ($10^{-3}$}},
ylabel style={yshift=-1em},xlabel style={yshift=0.0em},
yticklabel style={/pgf/number format/precision=2,/pgf/number format/fixed zerofill},
ymin=0,ymax=2.2, ytick={0.5, 1, 1.5, 2},
xmin=0,xmax=5,xtick={1,2,3,4},
legend style={xshift=-8pt,yshift=-4pt, legend plot pos=right,font=\scriptsize,cells={anchor=west}}
]
\addplot[red,line width=1.25pt] coordinates {(0,0) (1.6,2) (1.8,1.888) (2,1.787) (2.5,1.606) (3,1.462) (3.5,1.3549) (4,1.266) (4.5,1.193) (5,1.131)};
\addlegendentry{\scriptsize Base48}
%\addplot[red,line width=1.25pt] coordinates {(0,0) (8000,0.002) (10000,0.00179) (12000,0.00163) (12950,0.001572)};
\addplot[blue,line width=1.25pt] coordinates {(0,0) (0.8,2) (0.9906,1.7983)};
%\addplot[red,line width=1.25pt] coordinates {(0,0) (8000,0.002) (9906,0.0017983)};
\addplot[blue,dashed,line width=1.25pt] coordinates {(0.9906,1.7983) (0.9906,2)};
\addplot[blue,line width=1.25pt] coordinates {(0.9906,2) (1.1906,1.79) (1.3906,1.63) (1.4856,1.572)};
\addplot[blue,dashed,line width=1.25pt] coordinates {(1.4856,1.572) (1.4856,2)};
\addplot[blue,line width=1.25pt] coordinates {(1.4856,2) (1.6856,1.79) (1.8856,1.63) (1.9806,1.572)};
\addplot[blue,dashed,line width=1.25pt] coordinates {(1.9806,1.572) (1.9806,2)};
\addplot[blue,line width=1.25pt] coordinates {(1.9806,2) (2.1806,1.79) (2.3806,1.63) (2.4756,1.572)};
\addplot[blue,dashed,line width=1.25pt] coordinates {(2.4756,1.572) (2.4756,2)};
\addplot[blue,line width=1.25pt] coordinates {(2.4756,2) (2.6756,1.79) (2.8756,1.63) (2.9706,1.572)};
\addplot[blue,dashed,line width=1.25pt] coordinates {(2.9706,1.572) (2.9706,2)};
\addplot[blue,line width=1.25pt] coordinates {(2.9706,2) (3.1706,1.79) (3.3706,1.63) (3.4656,1.572) (3.6706,1.4602) (3.7136,1.44)};
\addplot[blue,dashed,line width=1.25pt] coordinates {(3.7136,1.44) (3.7136,2)};
\addplot[blue,line width=1.25pt] coordinates {(3.7136,2) (3.9136,1.79) (4.1136,1.63) (4.2086,1.572) (4.4136,1.4602) (4.4566,1.44) (4.7000,1.3574) (5.0000,1.2531)};
\addlegendentry{\scriptsize SDT48}
\end{axis}
}
\end{tikzpicture}
\end{center}
\ No newline at end of file
%%%------------------------------------------------------------------------------------------------------------
%%% 调序模型1:基于距离的调序
\begin{center}
\begin{tikzpicture}
\begin{scope}[minimum height = 20pt]
\node [anchor=east] (x1) at (-0.5em, 0) {$x_l$};
\node [anchor=west,draw,fill=red!20,inner xsep=5pt,rounded corners=2pt] (F1) at ([xshift=2em]x1.east){\small{$\mathcal{F}$}};
\node [anchor=west,circle,draw,minimum size=1em] (n1) at ([xshift=2em]F1.east) {};
\node [anchor=west,draw,fill=green!20,inner xsep=5pt,rounded corners=2pt] (ln1) at ([xshift=2em]n1.east){\small{\textrm{LN}}};
\node [anchor=west] (x2) at ([xshift=2em]ln1.east) {$x_{l+1}$};
\node [anchor=north] (x3) at ([yshift=-5em]x1.south) {$x_l$};
\node [anchor=west,draw,fill=green!20,inner xsep=5pt,rounded corners=2pt] (F2) at ([xshift=2em]x3.east){\small{\textrm{LN}}};
\node [anchor=west,draw,fill=red!20,inner xsep=5pt,rounded corners=2pt] (ln2) at ([xshift=2em]F2.east){\small{$\mathcal{F}$}};
\node [anchor=west,circle,draw,,minimum size=1em] (n2) at ([xshift=2em]ln2.east){};
\node [anchor=west] (x4) at ([xshift=2em]n2.east) {$x_{l+1}$};
\draw[->, line width=1pt] ([xshift=-0.1em]x1.east)--(F1.west);
\draw[->, line width=1pt] ([xshift=-0.1em]F1.east)--(n1.west);
\draw[->, line width=1pt] (n1.east)--node[above]{$y_l$}(ln1.west);
\draw[->, line width=1pt] ([xshift=-0.1em]ln1.east)--(x2.west);
\draw[->, line width=1pt] ([xshift=-0.1em]x3.east)--(F2.west);
\draw[->, line width=1pt] ([xshift=-0.1em]F2.east)--(ln2.west);
\draw[->, line width=1pt] ([xshift=0.1em]ln2.east)--node[above]{$y_l$}(n2.west);
\draw[->, line width=1pt] (n2.east)--(x4.west);
\draw[->,rounded corners,line width=1pt] ([yshift=-0.2em]x1.north) -- ([yshift=1em]x1.north) -- ([yshift=1.4em]n1.north) -- (n1.north);
\draw[->,rounded corners,line width=1pt] ([yshift=-0.2em]x3.north) -- ([yshift=1em]x3.north) -- ([yshift=1.4em]n2.north) -- (n2.north);
\draw[-] (n1.west)--(n1.east);
\draw[-] (n1.north)--(n1.south);
\draw[-] (n2.west)--(n2.east);
\draw[-] (n2.north)--(n2.south);
\node [anchor=south] (k1) at ([yshift=-0.1em]x1.north){};
\node [anchor=south] (k2) at ([yshift=-0.1em]x3.north){};
\begin{pgfonlayer}{background}
\node [rectangle,inner sep=0.3em,fill=orange!10] [fit = (x1) (F1) (n1) (ln1) (x2) (k1)] (box0) {};
\node [rectangle,inner sep=0.3em,fill=blue!10] [fit = (x3) (F2) (n2) (ln2) (x4) (k2)] (box1) {};
\end{pgfonlayer}
\node [anchor=north] (c1) at (box0.south){\footnotesize {(a)后作方式的残差连接}};
\node [anchor=north] (c2) at (box1.south){\footnotesize {(b)前作方式的残差连接}};
\end{scope}
\end{tikzpicture}
\end{center}
\ No newline at end of file
%%%------------------------------------------------------------------------------------------------------------
\begin{center}
\begin{tikzpicture}
\begin{scope}
\node [anchor=east,fill=orange!20,draw,rounded corners=3pt,minimum height=1.6em,minimum width=1.6em] (s11) at (-0.5em, 0) {\footnotesize{$\times h$}};
\node [rectangle,anchor=west,fill=blue!20,draw,rounded corners=3pt,minimum height=1.6em,minimum width=1.6em] (s12) at ([xshift=1.2em]s11.east) {};
\node [anchor=north,fill=orange!20,draw,rounded corners=3pt,minimum height=1.6em,minimum width=1.6em] (s21) at ([yshift=-1.2em]s11.south) {\footnotesize{$\times h$}};
\node [anchor=west,fill=orange!20,draw=red,rounded corners=3pt,minimum height=1.6em,minimum width=1.6em,dashed] (s22) at ([xshift=1.2em]s21.east) {\footnotesize{$\times h$}};
\node [anchor=west,fill=blue!20,draw,rounded corners=3pt,minimum height=1.6em,minimum width=1.6em] (s23) at ([xshift=1.2em]s22.east) {};
\node [anchor=north,fill=orange!20,draw,rounded corners=3pt,minimum height=1.6em,minimum width=1.6em] (s31) at ([yshift=-1.2em]s21.south) {\footnotesize{$\times h$}};
\node [anchor=west,fill=orange!20,draw,rounded corners=3pt,minimum height=1.6em,minimum width=1.6em] (s32) at ([xshift=1.2em]s31.east) {\footnotesize{$\times h$}};
\node [anchor=west,fill=orange!20,draw=red,rounded corners=3pt,minimum height=1.6em,minimum width=1.6em,dashed] (s33) at ([xshift=1.2em]s32.east) {\footnotesize{$\times h$}};
\node [anchor=west,fill=blue!20,draw,rounded corners=3pt,minimum height=1.6em,minimum width=1.6em] (s34) at ([xshift=1.2em]s33.east) {};
\node [anchor=north,fill=orange!20,draw,rounded corners=3pt,minimum height=1.6em,minimum width=1.6em] (s41) at ([yshift=-1.2em]s31.south) {\footnotesize{$\times h$}};
\node [anchor=west,fill=orange!20,draw,rounded corners=3pt,minimum height=1.6em,minimum width=1.6em] (s42) at ([xshift=1.2em]s41.east) {\footnotesize{$\times h$}};
\node [anchor=west,fill=orange!20,draw,rounded corners=3pt,minimum height=1.6em,minimum width=1.6em] (s43) at ([xshift=1.2em]s42.east) {\footnotesize{$\times h$}};
\node [anchor=west,fill=orange!20,draw=red,rounded corners=3pt,minimum height=1.6em,minimum width=1.6em,dashed] (s44) at ([xshift=1.2em]s43.east) {\footnotesize{$\times h$}};
\node [anchor=west,fill=blue!20,draw,rounded corners=3pt,minimum height=1.6em,minimum width=1.6em] (s45) at ([xshift=1.2em]s44.east) {};
\node [anchor=east] (p1) at ([xshift=-2em]s11.west) {\footnotesize{step 1}};
\node [anchor=east] (p2) at ([xshift=-2em]s21.west) {\footnotesize{step 2}};
\node [anchor=east] (p3) at ([xshift=-2em]s31.west) {\footnotesize{step 3}};
\node [anchor=east] (p4) at ([xshift=-2em]s41.west) {\footnotesize{step 4}};
\node [anchor=south,fill=orange!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (b1) at ([xshift=-0.2em,yshift=1.6em]p1.north) {};
\node [anchor=west] (b2) at (b1.east) {\footnotesize{:编码器}};
\node [anchor=west,fill=blue!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (b3) at ([xshift=1em]b2.east) {};
\node [anchor=west] (b4) at (b3.east) {\footnotesize{:解码器}};
\node [anchor=west] (b5) at ([xshift=2em]b4.east) {\footnotesize{:拷贝}};
\draw[-latex,thick,red,dashed] ([xshift=0.5em]b4.east) -- (b5.west);
\draw [-latex, line width=0.8pt] ([xshift=-1.2em]s11.west) -- (s11.west);
\draw [-latex, line width=0.8pt] (s11.east) -- (s12.west);
\draw [-latex, line width=0.8pt] (s12.east) -- ([xshift=1.2em]s12.east);
\draw [-latex, line width=0.8pt] ([xshift=-1.2em]s21.west) -- (s21.west);
\draw [-latex, line width=0.8pt] (s21.east) -- (s22.west);
\draw [-latex, line width=0.8pt] (s22.east) -- (s23.west);
\draw [-latex, line width=0.8pt] (s23.east) -- ([xshift=1.2em]s23.east);
\draw [-latex, line width=0.8pt] ([xshift=-1.2em]s31.west) -- (s31.west);
\draw [-latex, line width=0.8pt] (s31.east) -- (s32.west);
\draw [-latex, line width=0.8pt] (s32.east) -- (s33.west);
\draw [-latex, line width=0.8pt] (s33.east) -- (s34.west);
\draw [-latex, line width=0.8pt] (s34.east) -- ([xshift=1.2em]s34.east);
\draw [-latex, line width=0.8pt] ([xshift=-1.2em]s41.west) -- (s41.west);
\draw [-latex, line width=0.8pt] (s41.east) -- (s42.west);
\draw [-latex, line width=0.8pt] (s42.east) -- (s43.west);
\draw [-latex, line width=0.8pt] (s43.east) -- (s44.west);
\draw [-latex, line width=0.8pt] (s44.east) -- (s45.west);
\draw [-latex, line width=0.8pt] (s45.east) -- ([xshift=1.2em]s45.east);
\draw[-latex,thick,red,dashed] (s11.south)..controls +(south:1em) and +(north:1.2em)..(s22.north);
\draw[-latex,thick,red,dashed] (s22.south)..controls +(south:1em) and +(north:1.2em)..(s33.north);
\draw[-latex,thick,red,dashed] (s33.south)..controls +(south:1em) and +(north:1.2em)..(s44.north);
\end{scope}
\end{tikzpicture}
\end{center}
%%%------------------------------------------------------------------------------------------------------------
\begin{center}
\begin{tikzpicture}
\begin{scope}
\node [anchor=east,fill=orange!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (s11) at (-0.5em, 0) {};
\node [rectangle,anchor=west,fill=orange!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (s12) at ([xshift=2em]s11.east) {};
\node [anchor=west,fill=orange!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (s13) at ([xshift=2em]s12.east) {};
\node [anchor=west,fill=orange!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (s14) at ([xshift=2em]s13.east) {};
\node [anchor=north,fill=orange!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (s21) at ([yshift=-2.5em]s11.south) {};
\node [anchor=west,fill=orange!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (s22) at ([xshift=2em]s21.east) {};
\node [anchor=west,fill=orange!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (s23) at ([xshift=2em]s22.east) {};
\node [anchor=west,fill=orange!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (s24) at ([xshift=2em]s23.east) {};
\node [anchor=north,fill=orange!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (s31) at ([yshift=-2.5em]s21.south) {};
\node [anchor=west,fill=orange!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (s32) at ([xshift=2em]s31.east) {};
\node [anchor=west,fill=orange!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (s33) at ([xshift=2em]s32.east) {};
\node [anchor=west,fill=orange!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (s34) at ([xshift=2em]s33.east) {};
\node [anchor=north,fill=orange!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (s41) at ([yshift=-2.5em]s31.south) {};
\node [anchor=west,fill=orange!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (s42) at ([xshift=2em]s41.east) {};
\node [anchor=west,fill=orange!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (s43) at ([xshift=2em]s42.east) {};
\node [anchor=west,fill=orange!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (s44) at ([xshift=2em]s43.east) {};
\node [anchor=east] (p1) at ([xshift=-3.5em]s11.west) {$p=\infty$};
\node [anchor=east] (p2) at ([xshift=-4em]s21.west) {$p=1$};
\node [anchor=east] (p3) at ([xshift=-4em]s31.west) {$p=2$};
\node [anchor=east] (p4) at ([xshift=-4em]s41.west) {$p=4$};
\node [anchor=north] (p5) at ([yshift=-1em]p3.south) {$\cdots$};
\node [anchor=south,fill=orange!20,draw,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em] (b1) at ([xshift=-0.6em,yshift=1.2em]p1.north) {};
\node [anchor=west] (b2) at (b1.east) {\footnotesize{:Layer}};
\node [anchor=west,draw=red,rounded corners=3pt,minimum height=1.4em,minimum width=1.4em,dashed,line width=0.8pt] (b3) at ([xshift=1em]b2.east) {};
\node [anchor=west] (b4) at (b3.east) {\footnotesize{:Block}};
\draw [-latex, line width=0.8pt] ([xshift=-2em]s11.west) -- (s11.west);
\draw [-latex, line width=0.8pt] (s11.east) -- (s12.west);
\draw [-latex, line width=0.8pt] (s12.east) -- (s13.west);
\draw [-latex, line width=0.8pt] (s13.east) -- (s14.west);
\draw [-latex, line width=0.8pt] (s14.east) -- ([xshift=2em]s14.east);
\draw [-latex, line width=0.8pt] ([xshift=-2em]s21.west) -- (s21.west);
\draw [-latex, line width=0.8pt] (s21.east) -- (s22.west);
\draw [-latex, line width=0.8pt] (s22.east) -- (s23.west);
\draw [-latex, line width=0.8pt] (s23.east) -- (s24.west);
\draw [-latex, line width=0.8pt] (s24.east) -- ([xshift=2em]s24.east);
\draw [-latex, line width=0.8pt] ([xshift=-2em]s31.west) -- (s31.west);
\draw [-latex, line width=0.8pt] (s31.east) -- (s32.west);
\draw [-latex, line width=0.8pt] (s32.east) -- (s33.west);
\draw [-latex, line width=0.8pt] (s33.east) -- (s34.west);
\draw [-latex, line width=0.8pt] (s34.east) -- ([xshift=2em]s34.east);
\draw [-latex, line width=0.8pt] ([xshift=-2em]s41.west) -- (s41.west);
\draw [-latex, line width=0.8pt] (s41.east) -- (s42.west);
\draw [-latex, line width=0.8pt] (s42.east) -- (s43.west);
\draw [-latex, line width=0.8pt] (s43.east) -- (s44.west);
\draw [-latex, line width=0.8pt] (s44.east) -- ([xshift=2em]s44.east);
\node [draw=red,rounded corners=3pt,minimum height=1.7em,minimum width=1.7em,dashed,line width=0.8pt] (x21) at (s21) {};
\node [draw=red,rounded corners=3pt,minimum height=1.7em,minimum width=1.7em,dashed,line width=0.8pt] (x22) at (s22) {};
\node [draw=red,rounded corners=3pt,minimum height=1.7em,minimum width=1.7em,dashed,line width=0.8pt] (x23) at (s23) {};
\node [draw=red,rounded corners=3pt,minimum height=1.7em,minimum width=1.7em,dashed,line width=0.8pt] (x24) at (s24) {};
\node [draw=red,rounded corners=3pt,minimum height=1.7em,minimum width=5.2em,dashed,line width=0.8pt] (x31) at ([xshift=1.75em]s31) {};
\node [draw=red,rounded corners=3pt,minimum height=1.7em,minimum width=5.2em,dashed,line width=0.8pt] (x32) at ([xshift=1.75em]s33) {};
\node [draw=red,rounded corners=3pt,minimum height=1.7em,minimum width=12.2em,dashed,line width=0.8pt] (x41) at ([xshift=1.75em]s42) {};
{
\draw [-latex, line width=0.8pt] ([xshift=-1em]s21.west).. controls +(58:0.6) and +(122:0.6) .. ([xshift=1em]s21.east);
\draw [-latex, line width=0.8pt] ([xshift=-1em]s22.west).. controls +(58:0.6) and +(122:0.6) .. ([xshift=1em]s22.east);
\draw [-latex, line width=0.8pt] ([xshift=-1em]s23.west).. controls +(58:0.6) and +(122:0.6) .. ([xshift=1em]s23.east);
\draw [-latex, line width=0.8pt] ([xshift=-1em]s24.west).. controls +(58:0.6) and +(122:0.6) .. ([xshift=1em]s24.east);
}
{
\draw [-latex, line width=0.8pt] ([xshift=-1em]s21.west).. controls +(65:0.8) and +(115:0.8) .. ([xshift=1em]s22.east);
\draw [-latex, line width=0.8pt] ([xshift=-1em]s22.west).. controls +(65:0.8) and +(115:0.8) .. ([xshift=1em]s23.east);
\draw [-latex, line width=0.8pt] ([xshift=-1em]s23.west).. controls +(65:0.8) and +(115:0.8) .. ([xshift=1em]s24.east);
\draw [-latex, line width=0.8pt] ([xshift=-1em]s31.west).. controls +(65:0.8) and +(115:0.8) .. ([xshift=1em]s32.east);
\draw [-latex, line width=0.8pt] ([xshift=-1em]s33.west).. controls +(65:0.8) and +(115:0.8) .. ([xshift=1em]s34.east);
}
{
\draw [-latex, line width=0.8pt] ([xshift=-1em]s21.west).. controls +(70:1.0) and +(110:1.0) .. ([xshift=1em]s23.east);
\draw [-latex, line width=0.8pt] ([xshift=-1em]s22.west).. controls +(70:1.0) and +(110:1.0) .. ([xshift=1em]s24.east);
}
{
\draw [-latex, line width=0.8pt] ([xshift=-1em]s21.west).. controls +(75:1.2) and +(105:1.2) .. ([xshift=1em]s24.east);
\draw [-latex, line width=0.8pt] ([xshift=-1em]s31.west).. controls +(75:1.2) and +(105:1.2) .. ([xshift=1em]s34.east);
\draw [-latex, line width=0.8pt] ([xshift=-1em]s41.west).. controls +(75:1.2) and +(105:1.2) .. ([xshift=1em]s44.east);
}
\end{scope}
\end{tikzpicture}
\end{center}
%%%------------------------------------------------------------------------------------------------------------
%%% 调序模型1:基于距离的调序
\begin{center}
\begin{tikzpicture}
\begin{scope}[minimum height = 20pt]
\node [anchor=east] (x1) at (-0.5em, 0) {$x_l$};
\node [anchor=west,draw,fill=red!20,inner xsep=5pt,rounded corners=2pt] (ln1) at ([xshift=1em]x1.east){\small{\textrm{LN}}};
\node [anchor=west,draw,fill=green!20,inner xsep=5pt,rounded corners=2pt] (f1) at ([xshift=0.6em]ln1.east){\small{$\mathcal{F}$}};
\node [anchor=west,circle,draw,,minimum size=1em] (n1) at ([xshift=3em]f1.east){};
\node [anchor=west] (x2) at ([xshift=1em]n1.east) {$x_{l+1}$};
\node [anchor=west,draw,fill=red!20,inner xsep=5pt,rounded corners=2pt] (ln12) at ([xshift=1em]x2.east){\small{\textrm{LN}}};
\node [anchor=west,draw,fill=green!20,inner xsep=5pt,rounded corners=2pt] (f12) at ([xshift=0.6em]ln12.east){\small{$\mathcal{F}$}};
\node [anchor=west,circle,draw,,minimum size=1em] (n12) at ([xshift=3em]f12.east){};
\node [anchor=west] (x22) at ([xshift=1em]n12.east) {$x_{l+2}$};
\node [anchor=north] (x3) at ([yshift=-5em]x1.south) {$x_l$};
\node [anchor=west,draw,fill=red!20,inner xsep=5pt,rounded corners=2pt] (ln2) at ([xshift=1em]x3.east){\small{\textrm{LN}}};
\node [anchor=west,draw,fill=green!20,inner xsep=5pt,rounded corners=2pt] (f2) at ([xshift=0.6em]ln2.east){\small{$\mathcal{F}$}};
\node [anchor=west,minimum size=1em] (p1) at ([xshift=1em]f2.east){};
\node [anchor=north] (m1) at ([yshift=0.6em]p1.south){\tiny{\red{$M=1$}}};
\node [anchor=west,circle,draw,,minimum size=1em] (n2) at ([xshift=3em]f2.east){};
\node [anchor=west] (x4) at ([xshift=1em]n2.east) {$x_{l+1}$};
\node [anchor=west,draw,fill=red!20,inner xsep=5pt,rounded corners=2pt] (ln22) at ([xshift=1em]x4.east){\small{\textrm{LN}}};
\node [anchor=west,draw,fill=green!20,inner xsep=5pt,rounded corners=2pt] (f22) at ([xshift=0.6em]ln22.east){\small{$\mathcal{F}$}};
\node [anchor=west,minimum size=1em] (p2) at ([xshift=1em]f22.east){};
\node [anchor=north] (m2) at ([yshift=0.6em]p2.south){\tiny{\red{$M=0$}}};
\node [anchor=west,circle,draw,,minimum size=1em] (n22) at ([xshift=3em]f22.east){};
\node [anchor=west] (x42) at ([xshift=1em]n22.east) {$x_{l+2}$};
\draw[->, line width=1pt] ([xshift=-0.1em]x1.east)--(ln1.west);
\draw[->, line width=1pt] ([xshift=-0.1em]ln1.east)--(f1.west);
\draw[->, line width=1pt] ([xshift=0.1em]f1.east)--(n1.west);
\draw[->, line width=1pt] (n1.east)--(x2.west);
\draw[->, line width=1pt] ([xshift=-0.1em]x3.east)--(ln2.west);
\draw[->, line width=1pt] ([xshift=-0.1em]ln2.east)--(f2.west);
\draw[-, line width=1pt] ([xshift=0.1em]f2.east)--(p1.west);
\draw[*-,red,line width=0.6pt] (p1.west) -- (p1.east);
\draw[->, line width=1pt] (p1.east)--(n2.west);
\draw[->, line width=1pt] (n2.east)--(x4.west);
\draw[->,rounded corners,line width=1pt] ([yshift=-0.2em]x1.north) -- ([yshift=1em]x1.north) -- ([yshift=1.4em]n1.north) -- (n1.north);
\draw[->,rounded corners,line width=1pt] ([yshift=-0.2em]x3.north) -- ([yshift=1em]x3.north) -- ([yshift=1.4em]n2.north) -- (n2.north);
\draw[-] (n1.west)--(n1.east);
\draw[-] (n1.north)--(n1.south);
\draw[-] (n2.west)--(n2.east);
\draw[-] (n2.north)--(n2.south);
\draw[->, line width=1pt] ([xshift=-0.1em]x2.east)--(ln12.west);
\draw[->, line width=1pt] ([xshift=-0.1em]ln12.east)--(f12.west);
\draw[->, line width=1pt] ([xshift=0.1em]f12.east)--(n12.west);
\draw[->, line width=1pt] (n12.east)--(x22.west);
\draw[->, line width=1pt] ([xshift=-0.1em]x4.east)--(ln22.west);
\draw[->, line width=1pt] ([xshift=-0.1em]ln22.east)--(f22.west);
\draw[-, line width=1pt] ([xshift=0.1em]f22.east)--(p2.west);
\draw[*-,red,line width=0.6pt] ([yshift=-0.1em]p2.west) -- (p2.north east);
\draw[->, line width=1pt] (p2.east)--(n22.west);
\draw[->, line width=1pt] (n22.east)--(x42.west);
\draw[->,rounded corners,line width=1pt] ([yshift=-0.2em]x2.north) -- ([yshift=1em]x2.north) -- ([yshift=1.4em]n12.north) -- (n12.north);
\draw[->,rounded corners,line width=1pt] ([yshift=-0.2em]x4.north) -- ([yshift=1em]x4.north) -- ([yshift=1.4em]n22.north) -- (n22.north);
\draw[-] (n12.west)--(n12.east);
\draw[-] (n12.north)--(n12.south);
\draw[-] (n22.west)--(n22.east);
\draw[-] (n22.north)--(n22.south);
\node [anchor=south] (k1) at ([yshift=-0.1em]x1.north){};
\node [anchor=south] (k2) at ([yshift=-0.1em]x3.north){};
\begin{pgfonlayer}{background}
\node [rectangle,inner sep=0.3em,fill=orange!10] [fit = (x1) (f1) (n1) (ln1) (x2) (k1) (f12) (n12) (ln12) (x22)] (box0) {};
\node [rectangle,inner sep=0.3em,fill=blue!10] [fit = (x3) (f2) (n2) (ln2) (x4) (k2) (f22) (n22) (ln22) (x42)] (box1) {};
\end{pgfonlayer}
\node [anchor=north] (c1) at (box0.south){\footnotesize {(a)标准的Pre-Norm}};
\node [anchor=north] (c2) at (box1.south){\footnotesize {(b)基于随机子层跳跃的Pre-Norm}};
\end{scope}
\end{tikzpicture}
\end{center}
\ No newline at end of file
\definecolor{ublue}{rgb}{0.152,0.250,0.545}
\begin{tikzpicture}
\begin{axis}
[
width=5cm, height=3.5cm,
xtick={15,17,19,21,23,25},
ytick={6.0,6.5,7.0},
xlabel={\scriptsize Epoch},
ylabel={},
ylabel style={},
x tick label style={},
y tick label style={},
tick align=inside,
legend style={anchor=north,xshift=1.7cm,yshift=1cm,legend columns =-1},
ymin=5.7,
ymax=7.3,
xmin=14.6,
xmax=25.4,
extra y ticks={6.0,6.5,7.0},
extra y tick labels={3.7,3.8,3.9},
extra y tick style={ticklabel pos=right}]
\addplot [sharp plot,very thick,red!60,mark=diamond*] coordinates{(15,6.75) (16,6.73) (17,6.70) (18,6.67) (19,6.64) (20,6.61) (21,6.59) (22,6.58) (23,6.57) (24,6.58) (25,6.59)};
\addplot [sharp plot,very thick,purple!60,mark=triangle*] coordinates{(15,6.70) (16,6.4) (17,6.20) (18,6.30) (19,6.20) (20,6.10) (21,6.15) (22,6.10) (23,6.15) (24,6.16) (25,6.17)};
\legend{\scriptsize {训练集},\scriptsize{校验集}}
\end{axis}
\begin{axis}
[ xshift=6.6cm,
width=5cm, height=3.5cm,
xtick={15,17,19,21,23,25},
ytick={5.0,5.5,6.0},
xlabel={\scriptsize Epoch},
ylabel={},
ylabel style={},
x tick label style={},
y tick label style={},
tick align=inside,
ymin=4.7,
ymax=6.3,
xmin=14.6,
xmax=25.4,
extra y ticks={5.0,5.5,6.0},
extra y tick labels={3.5,3.6,3.7},
extra y tick style={ticklabel pos=right}]
\addplot [sharp plot,very thick,red!60,mark=diamond*] coordinates{(15,5.7) (16,5.65) (17,5.6) (18,5.55) (19,5.5) (20,5.45) (21,5.4) (22,5.38) (23,5.36) (24,5.34) (25,5.27)};
\addplot [sharp plot,very thick,purple!60,mark=triangle*] coordinates{(15,5.0) (16,4.9) (17,4.9) (18,5.05) (19,4.9) (20,5.0) (21,5.0) (22,5.1) (23,5.0) (24,5.15) (25,5.5)};
\end{axis}
\node [anchor=north,rotate=90] (n1) at (-1.3cm,1cm) {\scriptsize 训练集\ PPL};
\node [anchor=north,rotate=90] (n2) at (5.4cm,1cm) {\scriptsize 训练集\ PPL};
\node [anchor=north,rotate=90] (n3) at (4.2cm,1cm) {\scriptsize 校验集\ PPL};
\node [anchor=north,rotate=90] (n4) at (10.7cm,1cm) {\scriptsize 校验集\ PPL};
\end{tikzpicture}
%---------------------------------------------------------------------
\ No newline at end of file
......@@ -27,4 +27,267 @@
% NEW SECTION
%----------------------------------------------------------------------------------------
\section{}
\section{深层网络}
\parinterval {\chapterthirteen}已经指出:增加神经网络的深度有助于对句子进行更充分的表示、同时增加模型的容量。但是,简单地堆叠很多层Transformer网络并不能带来性能上的提升,反而会面临更加严重的梯度消失/梯度爆炸的问题。这是由于伴随神经网络变深,梯度无法有效地从输出层回传到底层网络,造成网络浅层部分的参数无法得到充分训练\upcite{Wang2019LearningDT,DBLP:conf/cvpr/YuYR18}。针对这些问题,已经有研究者开始尝试进行求解,并取得了很好的效果。比如,设计更有利于深层信息传递的网络连接和恰当的参数初始化方法等\upcite{Bapna2018TrainingDN,Wang2019LearningDT,DBLP:conf/emnlp/ZhangTS19}
\parinterval 但是,如何设计一个足够“深”的机器翻译模型仍然是业界关注的热点问题之一。此外,伴随着网络的继续变深,将会面临一些新的问题,例如,如何加速深层网络的训练,如何解决深层网络的过拟合问题等。下面将会对以上问题展开讨论。
%----------------------------------------------------------------------------------------
% NEW SUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{Post-Norm vs Pre-Norm}
\parinterval 为了探究为何深层的Transformer模型很难直接训练,首先对Transformer的模型结构进行简单的回顾。以Transformer的编码端为例,在多头自注意力网络和前馈神经网络中间,Transformer模型利用残差连接和层正则化操作来提高信息的传递效率。Transformer模型大致分为图\ref{fig:15-1}中两种结构\ \dash \ 后作方式的残差单元(Post-Norm)和前作方式的残差单元(Pre-Norm)。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter15/Figures/figure-post-norm-vs-pre-norm}
\caption{Post-Norm Transformer vs Pre-Norm Transformer}
\label{fig:15-1}
\end{figure}
%-------------------------------------------
\parinterval$x_l$$x_{l+1}$表示第$l$子层的输入和输出\footnote[1]{这里沿用Transformer中的定义,每一层(Layer)包含多个子层(Sub-layer)。比如,对于Transformer编码器,每一层包含一个自注意力子层和一个前馈神经网络子层。所有子层都需要进行层归一化和残差连接。}$y_l$表示中间的临时输出;$\textrm{LN}(\cdot)$表示层归一化操作\upcite{Ba2016LayerN},帮助减少子层输出分布的方差。从而让训练变得更稳定;{\red $\mathcal{F}(\cdot)$ (F这种斜体是有什么特殊含义吗)}表示子层所对应的函数,比如前馈神经网络、自注意力网络等。下面分别对Post-Norm和Pre-Norm进行简单的描述。
\begin{itemize}
\vspace{0.5em}
\item Post-Norm:早期的Transformer遵循的是Post-Norm结构\upcite{vaswani2017attention}。也就是层正则化作用于每一子层的输入和输出的残差结果上,如图\ref{fig:15-1}(a)所示。可以表示如下:
\begin{eqnarray}
x_{l+1}=\textrm{LN}(x_l+\mathcal{F}(x_l;\theta_l))
\label{eq:15-1}
\end{eqnarray}
其中,$\theta_l$是子层$l$的参数。
\vspace{0.5em}
\item Pre-Norm:通过调整层正则化的位置,将其放置于每一子层的输入之前,得到了Pre-Norm结构,如图\ref{eq:15-1}(b)所示。其思想与He等人的思想一致\upcite{DBLP:conf/eccv/HeZRS16},也被广泛应用于最新的Transformer开源系统中\upcite{Vaswani2018Tensor2TensorFN,Ottfairseq,KleinOpenNMT},公式如下:
\begin{eqnarray}
x_{l+1}=x_l+\mathcal{F}(\textrm{LN}(x_l);\theta_l)
\label{eq:15-2}
\end{eqnarray}
\end{itemize}
\parinterval 从上述公式可以看到Pre-Norm结构可以通过残差路径将底层网络的输出直接暴露给上层网络;另一方面从反向传播的角度看,使用Pre-Norm结构,顶层的梯度可以更容易地反馈到底层网络。这里以一个含有$L$个子层的结构为例。令$Loss$表示整个神经网络输出上的损失,$x_L$为顶层的输出。对于Post-Norm结构,根据链式法则,损失$Loss$相对于$x_l$的梯度可以表示为:
\begin{eqnarray}
\frac{\partial Loss}{\partial x_l}=\frac{\partial Loss}{\partial x_L} \times \prod_{k=l}^{L-1}\frac{\partial \textrm{LN}(y_k)}{\partial y_k} \times \prod_{k=l}^{L-1}(1+\frac{\partial \mathcal{F}(x_k;\theta_k)}{\partial x_k})
\label{eq:15-3}
\end{eqnarray}
其中$\prod_{k=l}^{L-1}\frac{\partial \textrm{LN}(y_k)}{\partial y_k}$表示在反向传播过程中经过层正则化得到的复合函数导数,$\prod_{k=l}^{L-1}(1+\frac{\partial \mathcal{F}(x_k;\theta_k)}{\partial x_k})$代表每个子层间残差连接的导数。
\parinterval 类似的,也能得到Pre-Norm结构的梯度计算结果,如下式所示:
\begin{eqnarray}
\frac{\partial Loss}{\partial x_l}=\frac{\partial Loss}{\partial x_L} \times (1+\sum_{k=l}^{L-1}\frac{\partial \mathcal{F}(\textrm{LN}(x_k);\theta_k)}{\partial x_l})
\label{eq:15-4}
\end{eqnarray}
\parinterval 对比公式\eqref{eq:15-3}和公式\eqref{eq:15-4}可以明显发现Pre-Norm结构直接把顶层的梯度$\frac{\partial Loss}{\partial x_L}$传递给下层,也就是$\frac{\partial Loss}{\partial x_l}$中直接含有$\frac{\partial Loss}{\partial x_L}$的部分。这个性质弱化了梯度计算对模型深度$L$的依赖;而如公式\eqref{eq:15-3}右侧所示,Post-Norm结构会导致一个与$L$相关的多项导数的积,伴随着$L$的增大更容易发生梯度消失和梯度爆炸问题。因此,Pre-Norm结构更适于堆叠多层神经网络的情况。比如,使用Pre-Norm结构可以很轻松的训练一个30层(60个子层)的Transformer编码器网络,并带来可观的BLEU提升。这个结果相当于标准Transformer编码器深度的6倍\upcite{Wang2019LearningDT}。相对的,用Pre-Norm结构训练深网络的时候,训练结果很不稳定,甚至有时候无法完成有效训练。这里把使用Pre-Norm的深层Transformer称为Transformer-Deep。
\parinterval 另一个有趣的发现是,使用深层网络后,训练模型收敛的时间大大缩短。相比于Transformer-Big等宽网络,Transformer-Deep并不需要太大的隐藏层大小就可以取得相当甚至更优的翻译品质。也就是说,Transformer-Deep是一个更“窄”更“深”的网络。这种结构的参数量比Transformer-Big少,系统运行效率更高。表\ref{tab:15-1}对比了不同模型的参数量和训练/推断时间。
%----------------------------------------------
\begin{table}[htp]
\centering
\caption{不同Transformer结构的训练/推断时间对比(WMT14英德任务)}
\begin{tabular}{l | r r r}
\rule{0pt}{15pt} 系统 & 参数量 & 训练时间 & 推断时间 \\
\hline
\rule{0pt}{15pt} Base & 63M & 4.6h & 19.4s \\
\rule{0pt}{15pt} Big & 210M & 36.1h & 29.3s \\
\rule{0pt}{15pt} DLCL-30 & 137M & 9.8h & 24.6s \\
\end{tabular}
\label{tab:15-1}
\end{table}
%----------------------------------------------
\parinterval 还有一个有趣的发现是,当编码器端使用深层网络之后,解码器端使用更浅的网络依然能够维持相当的翻译品质。这是由于解码器端的计算仍然会有对源语言端信息的加工和抽象,当编码器变深之后,解码器对源语言端的加工不那么重要了,因此可以减少解码网络的深度。这样做的一个直接好处是:可以通过减少解码器的深度加速翻译系统。对于一些延时敏感的场景,这种架构是极具潜力的。
%----------------------------------------------------------------------------------------
% NEW SUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{层聚合}
\parinterval 尽管使用Pre-Norm结构可以很容易地训练深层Transformer模型,但从信息传递的角度看,Transformer模型中第$n$层的输入仅仅依赖于前一层的输出。虽然残差连接可以将信息跨层传递,但是对于很深的网络,整个模型的输入和输出之间仍需要很多次残差连接才能进行有效的传递。为了使上层的网络可以更加方便地访问下层网络的信息,一种方法是直接引入更多跨层的连接。最简单的一种方法是直接将所有层的输出都连接到最上层,达到聚合多层信息的目的\upcite{Bapna2018TrainingDN,Wang2018MultilayerRF}。另一种更加有效的方式是使用{\small\bfnew{动态线性层聚合方法}}\index{动态线性层聚合方法}(Dynamic Linear Combination of Layers,DLCL)\index{Dynamic Linear Combination of Layers,DLCL}。在每一层的输入中不仅考虑前一层的输出,而是将前面所有层的中间结果(包括词嵌入)进行线性聚合,理论上等价于常微分方程中的高阶求解方法\upcite{Wang2019LearningDT}。以Pre-Norm结构为例,具体做法如下:
\begin{itemize}
\vspace{0.5em}
\item 对于每一层的输出$x_{l+1}$,对其进行层正则化,得到每一层的信息的表示
\begin{eqnarray}
z_{l}=\textrm{LN}(x_{l+1})
\label{eq:15-5}
\end{eqnarray}
注意,$z_0$表示词嵌入层的输出,$z_l(l>0)$表示Transformer网络中最终的各层输出。
\vspace{0.5em}
\item 定义一个维度为$(L+1)\times(L+1)$的权值矩阵$\vectorn{W}$,矩阵中每一行表示之前各层对当前层计算的贡献度,其中$L$是编码端(或解码端)的层数。令$\vectorn{W}_{l,i}$代表权值矩阵$\vectorn{W}$$l$行第$i$列的权重,则层聚合的输出为$z_i$的线性加权和:
\begin{eqnarray}
g_l=\sum_{i=0}^{l}z_i\times \vectorn{W}_{l,i}
\label{eq:15-6}
\end{eqnarray}
$g_l$会作为输入的一部分送入第$l+1$层。其网络的结构如图\ref{fig:15-2}所示
\end{itemize}
%---------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter15/Figures/figure-dynamic-linear-aggregation-network-structure}
\caption{动态线性层聚合网络结构图}
\label{fig:15-2}
\end{figure}
%-------------------------------------------
\parinterval 可以看到,权值矩阵$\vectorn{W}$是一个下三角矩阵。开始时,对矩阵参数的每行进行平均初始化,即初始化矩阵$\vectorn{W}_0$的每一行各个位置的值为$1/M,M \in (1,2,3 \cdots L+1)$。 伴随着神经网络的训练,网络通过反向传播算法来不断更新$\vectorn{W}$中每一行不同位置权重的大小。
\parinterval 动态线性层聚合的一个好处是,系统可以自动学习不同层对当前层的贡献度。在实验中也发现,离当前层更近的部分贡献度(权重)会更大,这也是符合直觉的。
%----------------------------------------------------------------------------------------
% NEW SUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{深层模型的训练加速}
\parinterval 尽管训练这种窄而深的神经网络对比宽网络有更快的收敛速度,但伴随着训练数据的增加,以及模型进一步的加深,神经网络的训练代价成为不可忽视的问题。例如,在几千万甚至上亿的双语平行语料上训练一个48层的Transformer模型需要将近几周的时间能达到收敛\footnote[2]{训练时间的估算是在单台8卡Titan V GPU服务器上得到的}。因此,在保证模型精度不变的前提下如何高效地完成深层网络的训练也是至关重要的。在实践中能够发现,深层网络中相邻层之间具有一定的相似性。因此,一个想法是:能否通过不断复用浅层网络的参数来初始化更深层的网络,渐进式的训练深层网络,避免从头训练整个网络,进而达到加速深层网络训练的目的。
%----------------------------------------------------------------------------------------
% NEW SUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{渐进式训练}
\parinterval 所谓渐进式训练是指从浅层网络开始,在训练过程中逐渐增加训练的深度。一种比较简单的方法是将网络分为浅层部分和深层部分,之后分别进行训练,最终达到提高模型翻译性能的目的\upcite{DBLP:conf/acl/WuWXTGQLL19}
\parinterval 另一种方式是动态构建深层网络,并尽可能复用浅层网络的训练结果。假设开始的时候模型包含$h$层网络,然后训练这个模型至收敛。之后,直接拷贝这$h$层网络(包括参数),并堆叠出一个$2h$层的模型。之后继续训练,重复这个过程。进行$n$次之后就得到了$n\times h$层的模型。图\ref{fig:15-3}给出了在编码端使用渐进式训练的示意图。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter15/Figures/figure-progressive-training}
\caption{渐进式深层网络训练过程}
\label{fig:15-3}
\end{figure}
%-------------------------------------------
\parinterval 渐进式训练的好处在于深层模型并不是从头开始训练。每一次堆叠,都相当于利用“浅”模型给“深”模型提供了一个很好的初始点,这样深层模型的训练会更加容易。
%----------------------------------------------------------------------------------------
% NEW SUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{分组稠密连接}
\parinterval 很多研究者已经发现深层网络不同层之间的稠密连接能够很明显地提高信息传递的效率\upcite{Wang2019LearningDT,DBLP:conf/cvpr/HuangLMW17,Dou2018ExploitingDR,DBLP:conf/acl/WuWXTGQLL19}。与此同时,对之前层信息的不断复用有助于得到更好的表示,但随之而来的是网络计算代价过大的问题。由于动态线性层聚合方法(DLCL)在每一次聚合时都需要重新计算之前每一层表示对当前层网络输入的贡献度,因此伴随着编码端整体深度的不断增加,这部分的计算代价变得不可忽略。例如,一个基于动态层聚合的48层Transformer模型的训练时间比不使用动态层聚合慢近1.9倍。同时,缓存中间结果也增加了显存的使用量,尽管使用了FP16计算,每张12G显存的GPU上计算的词也不能超过2048个,这导致训练开销急剧增大。
\parinterval 缓解这个问题的一种方法是使用更稀疏的层间连接方式。其核心思想与动态线性层聚合是类似的,不同点在于可以通过调整层之间连接的稠密程度来降低训练代价。比如,可以将每$p$层分为一组,之后动态线性层聚合只在不同组之间进行。这样,通过调节$p$值的大小可以控制网络中连接的稠密程度,作为一种训练代价与翻译性能之间的权衡。显然,标准的Transformer模型\upcite{vaswani2017attention}和DLCL模型\upcite{Wang2019LearningDT}都可以看作是该方法的一种特例。如图\ref{fig:15-4}所示:当$p=1$时,每一个单独的块被看作一个独立的组,这等价于基于动态层聚合的DLCL模型;当$p=\infty$时,这等价于正常的Transformer模型。值得注意的是,如果配合渐进式训练。在分组稠密连接中可以设置$p=h$
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter15/Figures/figure-sparse-connections-between-different-groups}
\caption{不同组之间的稀疏连接}
\label{fig:15-4}
\end{figure}
%-------------------------------------------
%----------------------------------------------------------------------------------------
% NEW SUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{学习率重置策略}
\parinterval 尽管渐进式训练策略与分组稠密连接结构可以加速深层网络的训练,但使用传统的学习率衰减策略会导致堆叠深层模型时的学习率较小,因此模型无法快速地达到收敛状态,同时也影响最终的模型性能。
\parinterval\ref{fig:15-5}中的红色曲线描绘了标准的Transformer模型的学习率曲线(WMT英德任务),可以看到当模型训练到40k步时,网络的学习率值对比峰值有明显的差距,而此时刚开始训练最终的深层模型,过小的学习率并不利于后期深层网络的充分训练。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter15/Figures/figure-learning-rate}
\caption{学习率重置vs从头训练的学习率曲线}
\label{fig:15-5}
\end{figure}
%-------------------------------------------
\parinterval 针对该问题的一个解决方案是修改学习率曲线的衰减策略。图中蓝色的曲线是修改后的学习率曲线。首先在训练的初期让网络快速的达到学习率的峰值(线性递增),之后的每一次从$p$层网络变为$2p$层网络时,都会将当前的学习率值重置到峰值点。之后,根据训练的步数对其进行相应的衰减。具体的步骤如下:
\begin{itemize}
\vspace{0.5em}
\item 在训练的初期,模型先经历一个学习率预热的过程:
\begin{eqnarray}
lr=d_{model}^{-0.5}\cdot step\_num \cdot warmup\_steps^{-0.5}
\label{eq:15-7}
\end{eqnarray}
这里,$step\_num$表示参数更新的次数,$warmup\_step$表示预热的更次次数,$d_{model}^{-0.5}$表示Transformer模型隐层大小,$lr$是学习率。
\vspace{0.5em}
\item 在之后的迭代训练过程中,每当进行新的迭代,学习率都会重置到峰值,之后进行相应的衰减:
\begin{eqnarray}
lr=d_{model}^{-0.5}\cdot step\_num^{-0.5}
\label{eq:15-8}
\end{eqnarray}
这里$step\_num$代表学习率重置后更新的步数。
\end{itemize}
\parinterval 综合使用渐进式训练、分组稠密连接、学习率重置策略可以在保证翻译品质不变的前提下,缩减近40\%的训练时间(40层编码器)。同时,加速比伴随着模型的加深与数据集的增大会进一步地扩大。
%----------------------------------------------------------------------------------------
% NEW SUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{深层模型的鲁棒性训练}
\parinterval 伴随着网络的加深,还会面临另外一个比较严峻的问题\ \dash \ 过拟合。由于参数量的增大,深层网络的输入与输出分布之间的差异也会越来越大,然而不同子层之间的{\small\bfnew{相互适应}}\index{相互适应}(Co-adaptation)\index{Co-adaptation}也会更加的明显,导致任意子层网络对其他子层的依赖过大。这对于训练阶段是有帮助的,因为不同子层可以协同工作从而更好地拟合训练数据。然而这种方式也降低了模型的泛化能力,即深层网络更容易陷入过拟合问题。
\parinterval 通常,可以使用Dropout手段用来缓解过拟合问题(见{\chapterthirteen})。不幸的是,尽管目前Transformer模型使用了多种Dropout手段(如Residual Dropout、Attention Dropout、 ReLU Dropout等),过拟合问题在深层网络中仍然存在。从图\ref{fig:15-6}中可以看到,深层网络对比浅层网络在训练集和校验集的困惑度上都有显著的优势,然而网络在训练一段时间后出现校验集困惑度上涨的现象,说明模型已经过拟合于训练数据。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter15/Figures/figure-wmt16}
\caption{浅层网络(左)与深层网络(右)在WMT16英德的校验集与训练集的困惑度}
\label{fig:15-6}
\end{figure}
%-------------------------------------------
\parinterval{\chapterthirteen}提到的Layer Dropout方法可以有效地缓解这个问题。以编码端为例, Layer Dropout的过程可以被描述为:在训练过程中,对自注意力子层或前馈神经网络子层进行随机丢弃,以减少不同子层之间的相互适应。这里选择Pre-Norm结构作为基础架构,它可以被描述为:
\begin{eqnarray}
x_{l+1}=\mathcal{F}(\textrm{LN}(x_l))+x_l
\label{eq:15-9}
\end{eqnarray}
其中$\textrm{LN}( \cdot )$表示层正则化函数, $\mathcal{F}( \cdot )$表示自注意力机制或者前馈神经网络,$x_l$表示第$l$个子层的输出。之后,使用一个掩码$M$(值为0或1)来控制每一子层是正常计算还是丢弃。于是,该子层的计算公式可以被重写为:
\begin{eqnarray}
x_{l+1}=M \cdot \mathcal{F}(\textrm{LN}(x_l))+x_l
\label{eq:15-10}
\end{eqnarray}
$M=0$代表该子层被丢弃,而$M=1$代表正常进行当前子层的计算。图\ref{fig:15-7}展示了这个方法与标准Pre-Norm结构之间的区别。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter15/Figures/figure-sublayer-skip}
\caption{标准的Pre-Norm结构与基于随机跳跃子层的Pre-Norm结构}
\label{fig:15-7}
\end{figure}
%-------------------------------------------
\parinterval 除此之外,有研究者已经发现残差网络中底层的子网络通过对输入进行抽象得到的表示对最终的输出有很大的影响,上层网络通过对底层网络得到的表示不断修正来拟合训练目标\upcite{DBLP:journals/corr/GreffSS16}。该结论同样适用于Transformer模型,比如,在训练中,残差支路以及底层的梯度范数通常比较大,这也间接表明底层网络在整个优化的过程中需要更大的更新。考虑到这个因素,在设计每一个子层被丢弃的概率时可以采用自底向上线性增大的策略,保证底层的网络相比于顶层更容易保留下来。这里用$L$来代表编码端块的个数,$l$代表当前的子层的编号,那么$M$可以通过以下的方式得到:
\begin{eqnarray}
M = \left\{\begin{array}{ll}
0&P \leqslant p_l\\
1&P > p_l
\end{array}\right.
\label{eq:15-11}
\end{eqnarray}
其中,$P$是服从伯努利分布的随机变量,$p_l$指每一个子层被丢弃的概率,具体计算方式如下:
\begin{eqnarray}
p_l=\frac{l}{2L}\cdot \varphi
\label{eq:15-12}
\end{eqnarray}
这里,$1 \leqslant l \leqslant 2L$ ,且$\varphi$是预先设定的超参数。
\parinterval 在Layer Dropout中,一个由$2L$个子层构成的残差网络,其顶层的输出相当于是$2^{2L}$个子网络的聚合结果。通过随机丢弃$n$个子层,则会屏蔽掉$2^n$个子网络的输出,将子网络的总体数量降低至$2^{2L-n}$。如图\ref{fig:15-8}所示的残差网络展开图,当有3个子层时,从输入到输出共存在8条路径,当删除子层sublayer2后,从输入到输出路径的路径则会减少到4条。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter15/Figures/figure-expanded-residual-network}
\caption{Layer Dropout中残差网络的展开图}
\label{fig:15-8}
\end{figure}
%-------------------------------------------
......@@ -5086,6 +5086,102 @@ pages ={157-166},
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% chapter 15------------------------------------------------------
@inproceedings{DBLP:conf/cvpr/YuYR18,
author = {Xin Yu and
Zhiding Yu and
Srikumar Ramalingam},
title = {Learning Strict Identity Mappings in Deep Residual Networks},
pages = {4432--4440},
publisher = {{IEEE} Conference on Computer Vision and Pattern Recognition},
year = {2018}
}
@inproceedings{DBLP:conf/emnlp/ZhangTS19,
author = {Biao Zhang and
Ivan Titov and
Rico Sennrich},
title = {Improving Deep Transformer with Depth-Scaled Initialization and Merged
Attention},
pages = {898--909},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2019}
}
@inproceedings{DBLP:conf/eccv/HeZRS16,
author = {Kaiming He and
Xiangyu Zhang and
Shaoqing Ren and
Jian Sun},
title = {Identity Mappings in Deep Residual Networks},
volume = {9908},
pages = {630--645},
publisher = {European Conference on Computer Vision},
year = {2016}
}
@inproceedings{Ottfairseq,
author = {Myle Ott and
Sergey Edunov and
Alexei Baevski and
Angela Fan and
Sam Gross and
Nathan Ng and
David Grangier and
Michael Auli},
title = {fairseq: {A} Fast, Extensible Toolkit for Sequence Modeling},
pages = {48--53},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2019}
}
@inproceedings{KleinOpenNMT,
author = {Guillaume Klein and
Yoon Kim and
Yuntian Deng and
Jean Senellart and
Alexander M. Rush},
title = {OpenNMT: Open-Source Toolkit for Neural Machine Translation},
pages = {67--72},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2017}
}
@inproceedings{DBLP:conf/acl/WuWXTGQLL19,
author = {Lijun Wu and
Yiren Wang and
Yingce Xia and
Fei Tian and
Fei Gao and
Tao Qin and
Jianhuang Lai and
Tie{-}Yan Liu},
title = {Depth Growing for Neural Machine Translation},
pages = {5558--5563},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2019}
}
@inproceedings{DBLP:conf/cvpr/HuangLMW17,
author = {Gao Huang and
Zhuang Liu and
Laurens van der Maaten and
Kilian Q. Weinberger},
title = {Densely Connected Convolutional Networks},
pages = {2261--2269},
publisher = {{IEEE} Conference on Computer Vision and Pattern Recognition},
year = {2017}
}
@article{DBLP:journals/corr/GreffSS16,
author = {Klaus Greff and
Rupesh Kumar Srivastava and
J{\"{u}}rgen Schmidhuber},
title = {Highway and Residual Networks learn Unrolled Iterative Estimation},
publisher = {International Conference on Learning Representations},
year = {2017}
}
%%%%% chapter 15------------------------------------------------------
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论