Commit 8cb6c7e3 by xiaotong

Merge branch 'master' of http://47.105.50.196/NiuTrans/mtbookv2

parents 644f6cd9 1324dd2c
\begin{tikzpicture}
\begin{scope}
\node [anchor=center] (node1) at (0,0) {\textbf{Machine translation}, sometiomes referred to by the abbreviation \textbf{MT} (not to be };
\node [anchor=north] (node2) at (node1.south) {confused with computer-aided translation,,machine-aided human translation inter};
\node [anchor=north] (node3) at (node2.south) {-active translation), is a subfield of computational linguistics that investigates the};
\node [anchor=north] (node4) at ([xshift=-1.8em]node3.south) {use of software to translate text or speech from one language to another.};
\node [anchor=south] (node5) at ([xshift=-12.8em,yshift=0.5em]node1.north) {\Large{WIKIPEDIA}};
\draw [-,line width=1pt]([xshift=-16.1em]node1.north) -- ([xshift=16.1em]node1.north);
\draw [-,line width=1pt]([xshift=-16.1em,yshift=-9.4em]node1.north) -- ([xshift=16.1em,yshift=-9.4em]node1.north);
\node [anchor=north] (node6) at ([xshift=-11.8em,yshift=-0.8em]node4.south) {\Large{维基百科}};
\node [anchor=north] (node7) at ([yshift=-4.6em]node3.south) {{\small\sffamily\bfnew{机器翻译}}(英语:Machine Translation,经常简写为MT,简称机译或机翻)};
\node [anchor=north] (node8) at ([xshift=-0.1em]node7.south) {属于计算语言学的范畴,其研究借由计算机程序将文字或演说从一种自然};
\node [anchor=north] (node9) at ([xshift=-9.85em]node8.south) {语言翻译成另一种自然语言。};
\begin{pgfonlayer}{background}
{
\node[rectangle,draw=black,inner sep=0.2em,fill=white,drop shadow] [fit =(node1)(node2)(node3)(node4)(node5)(node6)(node7)(node8)(node9)] (remark2) {};
}
\end{pgfonlayer}
\end{scope}
\end{tikzpicture}
\ No newline at end of file
\begin{tikzpicture}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%dropout
\node [anchor=north,inner sep=2pt,fill=green!20,minimum height=1.5em,minimum width=3.0em] (a11) at (0,0) {};
\node [anchor=north,inner sep=2pt,fill=green!20,minimum height=1.5em,minimum width=3.0em] (a12) at ([yshift=-0.2em]a11.south) {};
\node [anchor=north,inner sep=2pt,fill=green!20,minimum height=1.5em,minimum width=3.0em] (a13) at ([yshift=-0.2em]a12.south) {};
\node [anchor=north,inner sep=2pt,fill=green!20,minimum height=1.5em,minimum width=3.0em] (a14) at ([yshift=-0.2em]a13.south) {感到};
\node [anchor=north,inner sep=2pt,fill=green!20,minimum height=1.5em,minimum width=3.0em] (a15) at ([yshift=-0.2em]a14.south) {满意};
\node [anchor=south east,inner sep=1pt,fill=black] (pa11) at (a11.south east) {\tiny{\color{white} \textbf{0}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pa12) at (a12.south east) {\tiny{\color{white} \textbf{1}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pa13) at (a13.south east) {\tiny{\color{white} \textbf{2}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pa14) at (a14.south east) {\tiny{\color{white} \textbf{3}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pa15) at (a15.south east) {\tiny{\color{white} \textbf{4}}};
\node [anchor=west,inner sep=2pt,fill=green!20,minimum height=1.5em,minimum width=2.0em] (a21) at ([xshift=1.0em]a11.east) {\footnotesize{P=0.1}};
\node [anchor=north,inner sep=2pt,fill=red!20,minimum height=1.5em,minimum width=2.0em] (a22) at ([yshift=-0.2em]a21.south) {\footnotesize{P=0.1}};
\node [anchor=north,inner sep=2pt,fill=green!20,minimum height=1.5em,minimum width=2.0em] (a23) at ([yshift=-0.2em]a22.south) {\footnotesize{P=0.1}};
\node [anchor=north,inner sep=2pt,fill=green!20,minimum height=1.5em,minimum width=2.0em] (a24) at ([yshift=-0.2em]a23.south) {\footnotesize{P=0.1}};
\node [anchor=north,inner sep=2pt,fill=green!20,minimum height=1.5em,minimum width=2.0em] (a25) at ([yshift=-0.2em]a24.south) {\footnotesize{P=0.1}};
\node [anchor=west,inner sep=2pt] (a31) at ([xshift=0.3em]a23.east) {$\Rightarrow$};
\node [anchor=west,inner sep=2pt,fill=green!20,minimum height=1.5em,minimum width=3.0em] (a41) at ([xshift=2.0em]a21.east) {};
\node [anchor=north,inner sep=2pt,fill=green!20,minimum height=1.5em,minimum width=3.0em] (a42) at ([yshift=-1.94em]a41.south) {};
\node [anchor=north,inner sep=2pt,fill=green!20,minimum height=1.5em,minimum width=3.0em] (a43) at ([yshift=-0.2em]a42.south) {感到};
\node [anchor=north,inner sep=2pt,fill=green!20,minimum height=1.5em,minimum width=3.0em] (a44) at ([yshift=-0.2em]a43.south) {满意};
\node [anchor=south east,inner sep=1pt,fill=black] (pa41) at (a41.south east) {\tiny{\color{white} \textbf{0}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pa42) at (a42.south east) {\tiny{\color{white} \textbf{2}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pa43) at (a43.south east) {\tiny{\color{white} \textbf{3}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pa44) at (a44.south east) {\tiny{\color{white} \textbf{4}}};
\node [anchor=south,inner sep=2pt,minimum height=1.5em,minimum width=3.0em] (a10) at (a11.north) {\scriptsize{源语言}};
\node [anchor=south,inner sep=2pt,minimum height=1.5em,minimum width=3.0em] (a20) at (a21.north) {\small{P}};
\node [anchor=south,inner sep=2pt] (a30) at (a41.north) {\scriptsize{丢弃的结果}};
\node [anchor=south,inner sep=2pt] (a30-2) at (a30.north) {\scriptsize{部分词随机}};
\node [anchor=north,inner sep=2pt] (pos1) at ([xshift=0.5em,yshift=-0.5em]a25.south) {\scriptsize{(a)部分词随机丢弃的加噪方法}};
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%mask
\node [anchor=west,inner sep=2pt,fill=blue!20,minimum height=1.5em,minimum width=3.0em] (b11) at ([xshift=2.0em]a41.east) {};
\node [anchor=north,inner sep=2pt,fill=blue!20,minimum height=1.5em,minimum width=3.0em] (b12) at ([yshift=-0.2em]b11.south) {};
\node [anchor=north,inner sep=2pt,fill=blue!20,minimum height=1.5em,minimum width=3.0em] (b13) at ([yshift=-0.2em]b12.south) {};
\node [anchor=north,inner sep=2pt,fill=blue!20,minimum height=1.5em,minimum width=3.0em] (b14) at ([yshift=-0.2em]b13.south) {感到};
\node [anchor=north,inner sep=2pt,fill=blue!20,minimum height=1.5em,minimum width=3.0em] (b15) at ([yshift=-0.2em]b14.south) {满意};
\node [anchor=south east,inner sep=1pt,fill=black] (pb11) at (b11.south east) {\tiny{\color{white} \textbf{0}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pb12) at (b12.south east) {\tiny{\color{white} \textbf{1}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pb13) at (b13.south east) {\tiny{\color{white} \textbf{2}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pb14) at (b14.south east) {\tiny{\color{white} \textbf{3}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pb15) at (b15.south east) {\tiny{\color{white} \textbf{4}}};
\node [anchor=west,inner sep=2pt,fill=blue!20,minimum height=1.5em,minimum width=2.0em] (b21) at ([xshift=1.0em]b11.east) {\footnotesize{P=0.1}};
\node [anchor=north,inner sep=2pt,fill=blue!20,minimum height=1.5em,minimum width=2.0em] (b22) at ([yshift=-0.2em]b21.south) {\footnotesize{P=0.1}};
\node [anchor=north,inner sep=2pt,fill=red!20,minimum height=1.5em,minimum width=2.0em] (b23) at ([yshift=-0.2em]b22.south) {\footnotesize{P=0.1}};
\node [anchor=north,inner sep=2pt,fill=blue!20,minimum height=1.5em,minimum width=2.0em] (b24) at ([yshift=-0.2em]b23.south) {\footnotesize{P=0.1}};
\node [anchor=north,inner sep=2pt,fill=blue!20,minimum height=1.5em,minimum width=2.0em] (b25) at ([yshift=-0.2em]b24.south) {\footnotesize{P=0.1}};
\node [anchor=west,inner sep=2pt] (b31) at ([xshift=0.3em]b23.east) {$\Rightarrow$};
\node [anchor=west,inner sep=2pt,fill=blue!20,minimum height=1.5em,minimum width=3.0em] (b41) at ([xshift=2.0em]b21.east) {};
\node [anchor=north,inner sep=2pt,fill=blue!20,minimum height=1.5em,minimum width=3.0em] (b42) at ([yshift=-0.2em]b41.south) {};
\node [anchor=north,inner sep=2pt,fill=blue!20,minimum height=1.5em,minimum width=3.0em] (b43) at ([yshift=-0.2em]b42.south) {\scriptsize{[mask]}};
\node [anchor=north,inner sep=2pt,fill=blue!20,minimum height=1.5em,minimum width=3.0em] (b44) at ([yshift=-0.2em]b43.south) {感到};
\node [anchor=north,inner sep=2pt,fill=blue!20,minimum height=1.5em,minimum width=3.0em] (b45) at ([yshift=-0.2em]b44.south) {满意};
\node [anchor=south east,inner sep=1pt,fill=black] (pb41) at (b41.south east) {\tiny{\color{white} \textbf{0}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pb42) at (b42.south east) {\tiny{\color{white} \textbf{1}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pb43) at (b43.south east) {\tiny{\color{white} \textbf{2}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pb44) at (b44.south east) {\tiny{\color{white} \textbf{3}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pb45) at (b45.south east) {\tiny{\color{white} \textbf{4}}};
\node [anchor=south,inner sep=2pt,minimum height=1.5em,minimum width=3.0em] (b10) at (b11.north) {\scriptsize{源语言}};
\node [anchor=south,inner sep=2pt,minimum height=1.5em,minimum width=3.0em] (b20) at (b21.north) {\small{P}};
\node [anchor=south,inner sep=2pt] (b30) at (b41.north) {\scriptsize{屏蔽的结果}};
\node [anchor=south,inner sep=2pt] (b30-2) at (b30.north) {\scriptsize{部分词随机}};
\node [anchor=north,inner sep=2pt] (pos2) at ([xshift=0.5em,yshift=-0.5em]b25.south) {\scriptsize{(b)部分词随机屏蔽的加噪方法}};
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%打乱源语句子
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em,minimum width=3.0em] (c11) at ([yshift=-4.5em]a15.south) {};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em,minimum width=3.0em] (c12) at ([yshift=-0.2em]c11.south) {};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em,minimum width=3.0em] (c13) at ([yshift=-0.2em]c12.south) {};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em,minimum width=3.0em] (c14) at ([yshift=-0.2em]c13.south) {感到};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em,minimum width=3.0em] (c15) at ([yshift=-0.2em]c14.south) {满意};
\node [anchor=south east,inner sep=1pt,fill=black] (pc11) at (c11.south east) {\tiny{\color{white} \textbf{0}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pc12) at (c12.south east) {\tiny{\color{white} \textbf{1}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pc13) at (c13.south east) {\tiny{\color{white} \textbf{2}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pc14) at (c14.south east) {\tiny{\color{white} \textbf{3}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pc15) at (c15.south east) {\tiny{\color{white} \textbf{4}}};
\node [anchor=west,inner sep=2pt] (c21) at ([xshift=0.3em]c11.east) {\footnotesize{+}};
\node [anchor=west,inner sep=2pt] (c22) at ([xshift=0.3em]c12.east) {\footnotesize{+}};
\node [anchor=west,inner sep=2pt] (c23) at ([xshift=0.3em]c13.east) {\footnotesize{+}};
\node [anchor=west,inner sep=2pt] (c24) at ([xshift=0.3em]c14.east) {\footnotesize{+}};
\node [anchor=west,inner sep=2pt] (c25) at ([xshift=0.3em]c15.east) {\footnotesize{+}};
\node [anchor=west,inner sep=2pt,fill=yellow!20,minimum height=1.5em] (c31) at ([xshift=0.352em]c21.east) {\footnotesize{2.54}};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em] (c32) at ([yshift=-0.2em]c31.south) {\footnotesize{0.63}};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em] (c33) at ([yshift=-0.2em]c32.south) {\footnotesize{1.77}};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em] (c34) at ([yshift=-0.2em]c33.south) {\footnotesize{1.32}};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em] (c35) at ([yshift=-0.2em]c34.south) {\footnotesize{2.15}};
\node [anchor=west,inner sep=2pt] (c41) at ([xshift=0.55em]c31.east) {\footnotesize{=}};
\node [anchor=west,inner sep=2pt] (c42) at ([xshift=0.55em]c32.east) {\footnotesize{=}};
\node [anchor=west,inner sep=2pt] (c43) at ([xshift=0.55em]c33.east) {\footnotesize{=}};
\node [anchor=west,inner sep=2pt] (c44) at ([xshift=0.55em]c34.east) {\footnotesize{=}};
\node [anchor=west,inner sep=2pt] (c45) at ([xshift=0.55em]c35.east) {\footnotesize{=}};
\node [anchor=west,inner sep=2pt,fill=yellow!20,minimum height=1.5em,minimum width=3.0em] (c51) at ([xshift=0.55em]c41.east) {\footnotesize{$S_{0}=2.54$}};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em,minimum width=3.0em] (c52) at ([yshift=-0.2em]c51.south) {\footnotesize{$S_{1}=1.63$}};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em,minimum width=3.0em] (c53) at ([yshift=-0.2em]c52.south) {\footnotesize{$S_{2}=3.77$}};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em,minimum width=3.0em] (c54) at ([yshift= -0.2em]c53.south) {\footnotesize{$S_{3}=4.33$}};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em,minimum width=3.0em] (c55) at ([yshift=-0.2em]c54.south) {\footnotesize{$S_{4}=6.15$}};
\node [anchor=west,inner sep=2pt,fill=yellow!20,minimum height=1.5em,minimum width=3.0em] (c61) at ([xshift=3.72em]c51.east) {\footnotesize{$S_{0}^{'}=1.63$}};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em,minimum width=3.0em] (c62) at ([yshift=-0.2em]c61.south) {\footnotesize{$S_{1}^{'}=2.54$}};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em,minimum width=3.0em] (c63) at ([yshift=-0.2em]c62.south) {\footnotesize{$S_{2}^{'}=3.77$}};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em,minimum width=3.0em] (c64) at ([yshift=-0.2em]c63.south) {\footnotesize{$S_{3}^{'}=4.33$}};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em,minimum width=3.0em] (c65) at ([yshift=-0.2em]c64.south) {\footnotesize{$S_{4}^{'}=6.15$}};
\node [anchor=north,inner sep=2pt] (c71) at ([yshift=-12.3em]b31.south) {$\Rightarrow$};
\node [anchor=west,inner sep=2pt,fill=red!20,minimum height=1.5em,minimum width=3.0em] (c81) at ([xshift=2.0em]c61.east) {};
\node [anchor=north,inner sep=2pt,fill=red!20,minimum height=1.5em,minimum width=3.0em] (c82) at ([yshift=-0.2em]c81.south) {};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em,minimum width=3.0em] (c83) at ([yshift=-0.2em]c82.south) {};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em,minimum width=3.0em] (c84) at ([yshift=-0.2em]c83.south) {感到};
\node [anchor=north,inner sep=2pt,fill=yellow!20,minimum height=1.5em,minimum width=3.0em] (c85) at ([yshift=-0.2em]c84.south) {满意};
\node [anchor=south east,inner sep=1pt,fill=black] (pc81) at (c81.south east) {\tiny{\color{white} \textbf{0}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pc82) at (c82.south east) {\tiny{\color{white} \textbf{1}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pc83) at (c83.south east) {\tiny{\color{white} \textbf{2}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pc84) at (c84.south east) {\tiny{\color{white} \textbf{3}}};
\node [anchor=south east,inner sep=1pt,fill=black] (pc85) at (c85.south east) {\tiny{\color{white} \textbf{4}}};
\draw [->,dashed](c51.east)--(c62.west);
\draw [->,dashed](c52.east)--(c61.west);
\draw [->,dashed](c53.east)--(c63.west);
\draw [->,dashed](c54.east)--(c64.west);
\draw [->,dashed](c55.east)--(c65.west);
\node [anchor=south,inner sep=2pt,minimum height=1.5em,minimum width=3.0em] (c10) at (c11.north) {\scriptsize{源语言}};
\node [anchor=south,inner sep=2pt,minimum height=1.5em,minimum width=3.0em] (c30) at (c31.north) {\small{n=3}};
\node [anchor=south,inner sep=2pt,minimum height=1.5em,minimum width=3.0em] (c50) at (c51.north) {\small{S}};
\node [anchor=south,inner sep=2pt] (c60) at (c61.north) {\scriptsize{进行排序}};
\node [anchor=south,inner sep=2pt] (c60-2) at (c60.north) {\scriptsize{由小到大}};
\node [anchor=south,inner sep=2pt] (c80) at (c81.north) {\scriptsize{后的结果}};
\node [anchor=south,inner sep=2pt] (c80-2) at (c80.north) {\scriptsize{打乱顺序}};
\node [anchor=north,inner sep=2pt] (pos3) at ([xshift=2.4em,yshift=-0.5em]c55.south) {\scriptsize{(c)轻微打乱单词顺序的加噪方法}};
\end{tikzpicture}
......@@ -22,30 +22,40 @@
%----------------------------------------------------------------------------------------
\chapter{低资源神经机器机器翻译}
\parinterval 相比传统的统计机器翻译,神经机器翻译的性能得到了显著的提升,随之带来的问题是对海量训练双语句对的依赖。想要得到一个高质量的神经机器翻译模型,需要大量的训练数据,然而双语句对的获取代价相对较高,在很多场景下都很难获取到充分的训练数据。如汉语、英语这种使用范围广泛或使用人数较多的语言,很容易获取到语言之间大量的双语句对,我们称之为富资源语言,而对于一些少数民族语言或者中小国家的语言,如乌尔都语、斐济语等,很难获取到与其他语言的双语句对,我们称之为稀缺资源语言。世界上现存6000余中语言,仅有很少一部分为富资源语言,绝大多数均为稀缺资源语言。即使在富资源语言对中,对于一些特定的领域,如物理、天文等,双语句对也是十分稀缺的。因此,一直以来,稀缺资源机器翻译都是备受研究人员关注的一个问题。
\parinterval 相比传统的统计机器翻译,神经机器翻译的性能得到了显著的提升,随之带来的问题是对海量训练双语句对的依赖。想要得到一个高质量的神经机器翻译模型,需要大量的训练数据,然而双语句对的获取代价相对较高,在很多场景下都很难获取到充分的训练数据。如汉语、英语这种使用范围广泛或使用人数较多的语言,很容易获取到语言之间大量的双语句对,我们称之为富资源语言,而对于一些少数民族语言或者中小国家的语言,如乌尔都语、斐济语等,很难获取到与其他语言的双语句对,我们称之为低资源语言。世界上现存6000余种语言,仅有很少一部分为富资源语言,绝大多数均为低资源语言。即使在富资源语言对中,对于一些特定的领域,如物理、天文等,双语句对也是十分稀缺的。因此,一直以来,低资源机器翻译都是备受研究人员关注的一个问题。
{\red{(几种方法的概述,最后加)}}
%----------------------------------------------------------------------------------------
% NEW SECTION
%----------------------------------------------------------------------------------------
\section{单语数据的使用}
\parinterval 在统计机器翻译时代,使用单语数据训练语言模型就是构建机器翻译系统的关键步骤。好的语言模型往往会带来性能的增益。而这个现象在神经机器翻译中似乎并不明显,因为在大多数神经机器翻译的范式中,并不要求使用大规模单语数据来帮助机器翻译系统。甚至,连语言模型都不会作为一个独立的模块。这一方面是由于神经机器翻译系统的解码端本身就起着语言模型的作用,另一方面是由于数据的增多使得翻译模型可以更好的捕捉目标语言的规律。但是,双语数据总是有限的。很多场景下,单语数据的规模会远大于双语数据。比如,在专利翻译的很多细分领域中,双语数据的规模十分有限,但是有大量的和领域相关的单语数据。如果能够让这些单语数据发挥作用,显然是一种非常好的选择。在神经机器翻译中使用单语数据面临这两方面问题:
\section{单语数据的使用}
\begin{itemize}
\vspace{0.5em}
\item 从单语数据中学习什么样的知识?
\vspace{0.5em}
\item 如何在神经机器翻译中集成单语数据的知识?
\vspace{0.5em}
\end{itemize}
\parinterval {\red{这里可能需要一段开头}}
下面将从数据增强、基于语言模型的单语数据使用、翻译建模等方面对这两个问题展开讨论。
%----------------------------------------------------------------------------------------
% NEW SUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{数据增强}
\parinterval 数据增强(Data Augmentation)是为了缓解双语稀缺这一问题,利用单语数据来得到伪双语数据,从而增加训练数据的数量
\parinterval {\small\bfnew{数据增强}}(Data Augmentation)是为了缓解双语数据稀缺这一问题,利用单语数据来得到伪双语数据,从而增加训练数据量的方法
常用的数据增强方法包括回译、加噪和双语数据挖掘等。
\subsubsection{1. 回译}
\parinterval 回译(Back Translation, BT)是目前机器翻译任务上最常用的一种数据增强的方法。回译的主要思想是利用目标语言-源语言模型(反向翻译模型)来生成伪双语句对,用于训练源语言-目标语言翻译模型(正向翻译模型)。比如我们的目标是训练一个英汉翻译模型,首先使用双语数据训练汉英翻译模型,然后通过该模型将汉语的单语句子翻译为英语句子,从而得到大量的生成英语-真实汉语伪双语句对。然后,将回译得到的伪双语句对和真实双语句对混合,训练得到英汉神经机器翻译模型。
这种做法不需要修改模型结构,就可以简单有效地利用单语数据,因此在工业界也得到了广泛采用。图\ref{fig:16-1-xc}给出了回译方法的一个简要流程。
\parinterval {\small\bfnew{回译}}(Back Translation, BT)是目前机器翻译任务上最常用的一种数据增强的方法。回译的主要思想是利用目标语言-源语言模型(反向翻译模型)来生成伪双语句对,用于训练源语言-目标语言翻译模型(正向翻译模型)。比如我们的目标是训练一个英汉翻译模型,首先使用双语数据训练汉英翻译模型,然后通过该模型将汉语的单语句子翻译为英语句子,从而得到大量的生成英语-真实汉语伪双语句对。然后,将回译得到的伪双语句对和真实双语句对混合,训练得到英汉神经机器翻译模型。
回译方法是模型无关的,只需要训练一个反向翻译模型,就可以简单有效地利用单语数据来提高训练数据的数量,因此在工业界也得到了广泛采用。图\ref{fig:16-1-xc}给出了回译方法的一个简要流程。
%----------------------------------------------
\begin{figure}[htp]
......@@ -56,85 +66,153 @@
\end{figure}
%-------------------------------------------
\parinterval 在理想情况下,生成的伪数据和真实数据分布越接近越好。不过,在实践中发现,即使一些简单的策略也能带来性能的增长。比如,在一些低资源的语种,仅仅通过将目标语句子复制到源语言端构造的伪数据都能为模型带来增益\cite{DBLP:conf/wmt/CurreyBH17}。相比这些简单的构造策略,利用目标语言单语数据进行回译可以获得更高质量的伪数据。原因主要包括以下两点:
\parinterval 一般情况下,我们认为,反向模型的性能越好,生成的源语言译文质量越高,从而伪数据的分布和真实数据的分布越接近。不过,在实践中发现,即使一些简单的策略也能带来性能的增长。比如,在一些低资源的语种,仅仅通过将目标语句子复制到源语言端构造的伪数据都能为模型带来增益\upcite{DBLP:conf/wmt/CurreyBH17}。相比这些简单的构造策略,利用目标语言单语数据进行回译可以获得更高质量的伪数据。原因主要包括以下两点:
\begin{itemize}
\vspace{0.5em}
\item 伪双语句对的源语是模型生成的翻译结果,相比真实数据存在一定的噪声。神经机器翻译模型在伪双语句对上进行训练,可以学习如何处理带有噪声的输入,提高了模型的鲁棒性;
\item 伪双语句对的源语言是模型生成的翻译结果,保留了两种语言之间的互译信息,相比真实数据又存在一定的噪声。神经机器翻译模型在伪双语句对上进行训练,可以学习到如何处理带有噪声的输入,提高了模型的鲁棒性;
\vspace{0.5em}
\item 伪双语句对的目标语是真实的句子,保证了神经机器翻译模型生成结果的流利度,因此可以得到更符合人类阅读习惯的翻译结果。
\item 伪双语句对的目标语是真实的句子,保证了神经机器翻译模型生成结果的流利度,因此可以得到更符合人类阅读习惯的翻译结果。
\vspace{0.5em}
\end{itemize}
\parinterval 围绕如何利用回译方法生成对模型更有帮助的伪双语数据,研究人员们进行了详细的分析探讨。在低资源的语种上,由于双语数据稀缺,模型需要更多的高质量伪双语数据。而在富资源的语种中,对回译产生的源语句子添加一些噪音,提高翻译结果的多样性,可以达到更好的效果,比较常用的方法是使用采用TopK或采样解码\cite{DBLP:conf/emnlp/EdunovOAG18}
\parinterval 回译常用的解码方式为束搜索,在生成每个词的时候只考虑预测概率最高的词,因此生成的翻译结果质量更高,但导致的问题是翻译结果缺乏多样性,生成的伪数据也就很难去准确地覆盖真实的数据分布。采样解码方法是指在生成过程中,对词表中所有的词按照预测概率进行随机采样,因此整个词表中的词都有可能被选中,从而使生成结果更具多样性,但翻译质量也会明显下降。TopK解码方法是对束搜索和采样方法的一个折中,在生成过程中,对预测概率最高的前K个词进行随机采样,这样在保证翻译结果的准确性的前提下,也提高了结果的多样性。三种方法如图\ref{fig:16-2-xc}所示。
\parinterval 围绕如何利用回译方法生成对模型更有帮助的伪双语数据,研究人员们进行了详细的分析探讨。一般观点认为,反向模型的性能越好,生成的伪数据质量也就更高,对前向模型的性能提升也就越大。回译方法面临的一个问题是:反向翻译模型的训练只依赖于有限的双语数据,生成的源语言端伪数据的质量难以保证。为此,可以采用{\small\sffamily\bfnew{迭代式回译}}\index{迭代式回译}(Iterative Back Translation)\index{Iterative Back Translation}的方法\upcite{DBLP:conf/aclnmt/HoangKHC18},同时利用源语言端和目标语言端的单语数据,不断通过回译的方式来提升前向和反向翻译模型的性能。图\ref{fig:16-2-xc}展示了迭代式回译的框架。首先,使用双语数据训练一个前向翻译模型,然后利用源语言单语数据通过回译的方式生成伪双语数据,来提升反向翻译模型的性能,最后由反向翻译模型和目标语言单语数据生成的伪双语数据来提升前向翻译模型的性能。可以看出,这个往复的过程是闭环的,因此可以一直重复进行,直到两个翻译模型的性能均不再提升。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter16/Figures/figure-contrast-diagram-of-beam-search-topk-and-sampling}
\caption{回译的三种解码方式}
\input{./Chapter16/Figures/figure-example-of-iterative-back-translation}
\caption{迭代式回译方法的流程}
\label{fig:16-2-xc}
\end{figure}
%-------------------------------------------
\parinterval 回译方法的一个问题是:反向翻译模型的训练只依赖于有限的双语数据,生成的源语言端伪数据的质量难以保证。为此,可以采用{\small\sffamily\bfnew{迭代式回译}}\index{迭代式回译}(Iterative Back Translation)\index{Iterative Back Translation}的方法,同时利用源语言端和目标语言端的单语数据,不断通过回译的方式来提升前向和反向翻译模型的性能。图\ref{fig:16-3-xc}展示了迭代式回译的框架。首先使用双语数据训练一个前向翻译模型,然后利用源语言单语数据通过回译的方式来提升反向翻译模型的性能,最后由反向翻译模型和目标端单语数据生成的伪数据来提升前向翻译模型的性能。可以看出,这个往复的过程是闭环的,因此可以一直进行下去,直到两个翻译模型的性能不再提升
\parinterval 然而,研究人员发现,在低资源语种上,由于缺乏双语数据,高质量的伪双语数据对于模型来说更有帮助。而在富资源语种中,对回译产生的源语言句子添加一些噪声,提高翻译结果的多样性,反而可以达到更好的效果,比较常用的方法是使用采样或TopK解码\upcite{DBLP:conf/emnlp/EdunovOAG18}
\parinterval 回译的扩展工作还包括如何选择单语数据来生成伪数据\cite{DBLP:conf/emnlp/FadaeeM18,DBLP:conf/nlpcc/XuLXLLXZ19}、如何更有效地在伪数据和真实数据上进行训练\cite{DBLP:conf/wmt/CaswellCG19,DBLP:conf/emnlp/WangLWLS19}、采用迭代回译的方法逐步生成高质量的伪数据\cite{DBLP:journals/corr/abs200111327,DBLP:journals/corr/abs200403672},感兴趣的可以自行阅读相关论文
\parinterval 回译常用的解码方式为束搜索,在生成每个词的时候只考虑预测概率最高的词,因此生成的翻译结果质量更高,但导致的问题是翻译结果主要都是一些高频词,缺乏多样性,生成的伪数据也就很难去准确地覆盖真实的数据分布\upcite{DBLP:conf/icml/OttAGR18}。采样解码是指在解码过程中,对词表中所有的词按照预测概率进行随机采样,因此整个词表中的词都有可能被选中,从而使生成结果更具多样性,但翻译质量和流利度也会明显下降。TopK解码是对束搜索和采样解码的一个折中方法。在解码过程中,TopK解码对词表中预测概率最高的前K个词进行随机采样,这样在保证翻译结果准确性的前提下,也提高了结果的多样性。由于采样和TopK解码只在生成的源语言句子中引入了多样性,因此也可以提高对包含低频词或噪声句子的训练次数。三种方法如图\ref{fig:16-3-xc}所示
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter16/Figures/figure-example-of-iterative-back-translation}
\caption{迭代式回译方法的流程}
\input{./Chapter16/Figures/figure-contrast-diagram-of-beam-search-topk-and-sampling}
\caption{回译的三种解码方式}
\label{fig:16-3-xc}
\end{figure}
%-------------------------------------------
\parinterval 与回译的方法类似,源语言的单语数据也可以通过一个双语数据训练的翻译模型获得对应的目标语,构造{\small\sffamily\bfnew{前向翻译}}\index{前向翻译}(Forward Translation)\index{Forward Translation}的伪数据。与回译方法相反,前向翻译伪数据中源语言端是真实的,而目标语言端是生成的,构造的伪数据对译文的流畅性并没有太大帮助,其主要作用是丰富了训练数据中源语的表示,提升翻译模型中编码器的性能。大多数情况下,前向翻译方法带来的性能提升效果要弱于回译。
\parinterval 与回译方法类似,源语言的单语数据也可以通过一个双语数据训练的前向翻译模型获得对应的目标语译文,从而构造{\small\sffamily\bfnew{前向翻译}}\index{前向翻译}(Forward Translation)\index{Forward Translation}的伪数据。与回译方法相反,前向翻译伪数据中源语言句子是真实的,而目标语言句子是生成的,构造的伪数据对译文的流畅性并没有太大帮助,甚至有害,其主要作用是丰富了训练数据中源语言的表示,提升翻译模型中编码器的性能。大多数情况下,利用前向翻译模型生成伪数据的方法带来的性能提升效果要弱于回译。
\subsubsection{2. 修改双语数据}
\subsubsection{2. 加噪}
{\red{(这种方法并没有使用单语数据,放在这一节是否合适?或者要修改一下章节题目。)}}
\parinterval了回译方法,另外一种常用的数据增强技术是对真实双语数据加入一些噪声,得到伪双语数据,然后将二者混合来训练神经机器翻译模型。常用的加噪方法包括按照一定的概率将源语句子中的部分词随机丢弃或替换为其它词、轻微打乱源语句子的顺序。{\red{如图X所示。(这里可以加个图描述一下三种方法)}}
\parinterval利用回译方法生成伪数据外,另外一种数据增强技术是在原始的双语数据上进行操作得到伪数据,常用的方法包括加噪、词替换等。
\parinterval 这种对训练数据加噪的方法仅在源语句子上进行操作,和回译方法相似,既保证了目标句子的准确性,又可以提高训练数据的量级。加噪方法也被用于训练降噪自编码器,在无监督机器翻译模型中也得到了广泛应用,详细方法可以参考xxx小节。
\parinterval 加噪是指在保证句子整体语义的情况下,对原始的双语数据适当加入一些噪声,从而生成伪双语数据来增加训练数据量。常用的加噪方法主要有以下三种:
%(目标语低频词替换方法[Data Augmentation for Low-Resource Neural Machine Translation]是利用语言模型,将目标语句子中词替换为词表中的低频词,从而提高模型对低频词的预测准确率。
%丢词(word dropout)[Edinburgh Neural Machine Translation Systems for WMT 16]在源语或目标语句子中按照一定的概率随机选择一些词,将这些词的词嵌入设为0。
%丢词方法的思想和dropout的思想是一致的,可以提高模型的鲁棒性,避免模型陷入过拟合问题。)
\begin{itemize}
\vspace{0.5em}
\item 丢词:句子中的每个词均有$\funp{P}_{\rm{drop}}$的概率被丢弃。
\vspace{0.5em}
\item 屏蔽词:句子中的每个词均有$\funp{P}_{\rm{mask}}$的概率被替换为一个额外的[mask]词。
\vspace{0.5em}
\item 乱序:将句子中距离较近的某些词的位置进行交换,打乱句子中的单词顺序。
\vspace{0.5em}
\end{itemize}
\ref{fig:16-4-xc}展示了三种加噪方法的示例。这里,$\funp{P}_{\rm{drop}}$$\funp{P}_{\rm{mask}}$均设置为0.1,表示每个词有$10\%$的概率被丢弃或屏蔽,乱序的操作略微复杂,一种实现方法是,通过一个数字来表示每个词在句子中的位置,如“我”是第零个词,“你”是第二个词,然后,在每个位置生成一个$0$$n$的随机数,n一般设置为3,然后将每个词的位置数和对应的随机数相加,即图中的$S$。对$S$按照从小到大排序,根据排序后每个位置的索引从原始句子中选择对应的词,从而得到最终打乱顺序后的结果。比如,在排序后,$S_1$的值小于$S_0$,其余词则保持递增顺序,则将原始句子中的第零个词和第一个词的顺序进行交换,其他词保持不变。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter16/Figures/figure-three-common-methods-of-adding-noise}
\caption{三种加噪方法}
\label{fig:16-4-xc}
\end{figure}
%-------------------------------------------
\parinterval 和回译方法相似,加噪方法一般仅在源语言句子上进行操作,既保证了目标语言句子的流利度,又可以提高训练数据量,增加数据的多样性。加噪方法也被用于训练降噪自编码器,在无监督机器翻译中也得到了广泛应用,详细方法可以参考xxx小节。
\parinterval 将一个句子中的某个词替换为其他词,可能并不会影响句子的合理性和流利度。比如,对于“我出去玩。”这句话,将“我”替换为“你”、“他”、“我们”或是将“玩”替换为“骑车”、“学习”、“吃饭”等,虽然改变了语义,但句子仍然是合理的。词替换方法即是将双语数据中的部分词替换为词表中的其他词,在保证句子的语义或语法的前提下,增加了训练数据的多样性。
\parinterval 一种替换策略是对目标语中的稀有词进行替换\upcite{DBLP:conf/acl/FadaeeBM17a}。词表中的稀有词由于出现次数较少,很容易导致训练不充分问题,从而无法准确预测稀有词。对于一个双语句对$(\mathbi{s}, \mathbi{t})$$\mathbi{s} = (s_1, \dots, s_i, \dots, s_n)$, $\mathbi{t} = (t_1, \dots, t_j, \dots, t_m)$,假定选择源语言句子$\mathbi{s}$进行替换,那么在源语言句子的位置$i$,可以根据前面的文本和前向语言模型得到前向候选稀有词,如公式所示:
\begin{eqnarray}
\overrightarrow{C} = \{s_i^{'} \in V_R: TopK \funp{P}_{\rm{ForwardLM}}(s_i^{'} | s_i^{i-1})\}
\end{eqnarray}
\parinterval 这里,$V_R$表示稀有词词表,可以设定为训练数据中出现次数最少的部分词或出现次数小于一定阈值的部分词,$\funp{TopK}$表示选择预测概率最高的前K个词。如果这K个词在稀有词词表$V_R$中,则将其加入到前向候选稀有词中。同样,利用后面的文本和反向语言模型得到后向候选稀有词:
\begin{eqnarray}
\overleftarrow{C} = \{s_i^{'} \in V_R: TopK \funp{P}_{\rm{BackwardLM}}(s_i^{'} | s_i^{i-1})\}
\end{eqnarray}
\parinterval 然后,就可以选择同时出现在前向候选稀有词和反向候选稀有词的单词:
\begin{eqnarray}
{C} = \{s_i | s_i^{'} \in \overrightarrow{C} \wedge s_i^{'} \in \overleftarrow{C}\}
\end{eqnarray}
\parinterval 前向和反向语言模型均可以使用海量的单语数据训练得到。通过这种方法,选取到的稀有词保证了替换后句子的句法合理性以及流利度。最后,根据词对齐找到替换词在目标语言句子中对应的位置$j$,根据翻译词典等方法,就可以将目标语言句子中的$t_j$替换为$s_i^{'}$对应的翻译结果$t_j^{'}$,从而生成得到伪双语数据。
\subsubsection{3. 双语句对挖掘}
\parinterval 从可比语料中挖掘可用的双语句对也是一种常用的方法。可比语料是指源语言和目标语言虽然不是完全互译的文本,但是蕴含了丰富的双语对照知识,可以从中挖掘出可用的双语句对来训练神经机器翻译模型。一般来说,有标注的双语数据的获取代价较高,而可比语料相对容易获取,比如多种语言报道的新闻事件、多种语言的维基百科词条;多种语言翻译的书籍等。(加一个维基百科 机器翻译 词条的中英文截图)
\parinterval 从可比语料中挖掘可用的双语句对也是一种常用的数据增强方法。可比语料是指源语言和目标语言虽然不是完全互译的文本,但是蕴含了丰富的双语对照知识,可以从中挖掘出可用的双语句对来训练。相比有标注的双语语料来说,可比语料相对容易获取,比如多种语言报道的新闻事件、多种语言的维基百科词条;多种语言翻译的书籍等。如图\ref{fig:16-5-xc}中展示的维基百科词条所示。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter16/Figures/figure-examples-of-comparable-corpora}
\caption{维基百科中的可比预料}
\label{fig:16-5-xc}
\end{figure}
%-------------------------------------------
\parinterval 可比语料一般从网页中爬取得到,内容一般较为复杂,可能会存在较大比例的噪声,如HTML字符、乱码等,首先需要进行充分的数据清洗操作,得到干净的可比语料,然后从中抽取出可用的双语句对。传统的抽取方法一般通过模型或双语词典来得到,比如通过计算两个不同语言句子之间的单词重叠数或BLEU值\upcite{finding2006adafre,method2008keiji},通过排序模型或二分类器判断一个目标语言句子和一个源语言句子互译的可能性\upcite{DBLP:journals/coling/MunteanuM05,DBLP:conf/naacl/SmithQT10}
\parinterval 另外一种比较有效的方法是根据两种语言中每个句子的句向量来抽取。首先,对于两种语言的每个句子,分别使用词嵌入加权平均等方法计算得到蕴含句子语义信息的句向量,然后计算每个源语言句子和目标语言句子之间的余弦相似度,相似度大于一定阈值的句对则认为是可用的双语句对\cite{DBLP:conf/emnlp/WuZHGQLL19}。然而,不同语言单独训练得到的词嵌入可能在不同的表示空间,因此得到的句向量无法用于衡量两个句子的相似度。为了解决这个问题,一般使用在同一表示空间的跨语言词嵌入来表示两种语言的单词,在跨语言词嵌入中,不同语言相同意思的词对应的词嵌入具有较高的相似性,因此得到的句向量也就可以用于衡量两个句子是否表示相似的语义。关于跨语言词嵌入的具体内容,可以参考xxx(双语词典归纳一节)。
\parinterval 可比语料一般从网页中爬取得到,内容一般较为复杂,可能会存在较大比例的噪声,如HTML字符等,首先需要进行充分的数据清洗操作,得到干净的可比语料,然后从中抽取出可用的双语句对。传统的抽取方法为利用对齐句子或双语词典来挖掘相似的句对({\red{是否要补充具体方法}})。随着神经机器翻译模型的流行,使用神经网络来挖掘双语句对的方法得到了进一步的探索。
(扩展阅读)
% \parinterval 此外,回译的扩展工作还包括如何选择单语数据来生成伪数据\upcite{DBLP:conf/emnlp/FadaeeM18,DBLP:conf/nlpcc/XuLXLLXZ19}、如何更有效地在伪数据和真实数据上进行训练\upcite{DBLP:conf/wmt/CaswellCG19,DBLP:conf/emnlp/WangLWLS19}、采用迭代回译的方法逐步生成高质量的伪数据\upcite{DBLP:journals/corr/abs200111327,DBLP:journals/corr/abs200403672},感兴趣的可以自行阅读相关论文。
\parinterval 首先,对于两种语言的每个句子,使用词嵌入加权平均等方法计算得到蕴含句子语义信息的句向量,然后计算可比语料中每个源语句子和目标语句子之间的余弦相似度,相似度大于一定阈值的句对则认为是可用的双语句对\ref{DBLP:conf/emnlp/WuZHGQLL19}。为了将两种不同语言的句向量保持在同一表示空间,一般使用跨语言词嵌入来表示单词含义,具体内容可以参考xxx(双语词典归纳一节)。
\parinterval 除此之外,还有很多工作对数据增强方法进行了深入的研究与探讨。探索源语言单语数据在神经机器翻译中的使用方法\upcite{DBLP:conf/emnlp/ZhangZ16};选择何种单语数据来生成伪数据带来的收益更大\upcite{DBLP:conf/emnlp/FadaeeM18,DBLP:conf/nlpcc/XuLXLLXZ19};通过特别标识对真实双语和回译生成的伪双语数据进行区分\upcite{DBLP:conf/wmt/CaswellCG19};在回译过程中对训练数据进行动态选择与加权\upcite{DBLP:journals/corr/abs200403672};利用目标端单语数据和相关的富资源语言进行数据增强\upcite{DBLP:conf/acl/XiaKAN19};通过在源语或目标语中随机选择某些词,将这些词替换为词表中随机的一个词,可以得到伪双语数据\upcite{DBLP:conf/emnlp/WangPDN18};随机选择句子中的某个词,将这个词的词嵌入替换为多个语义相似词的加权表示融合\upcite{DBLP:conf/acl/GaoZWXQCZL19};基于模型的不确定性来量化预测结果的置信度,从而提升回译方法的性能\upcite{DBLP:conf/emnlp/WangLWLS19};探索如何利用大规模单语数据\upcite{DBLP:conf/emnlp/WuWXQLL19};还有一些工作对数据增强进行了理论分析\upcite{DBLP:conf/emnlp/LiLHZZ19}
[On integrating a language model into neural machine translation]
%----------------------------------------------------------------------------------------
% NEW SUB-SECTION
%----------------------------------------------------------------------------------------
\subsection{语言模型}
\subsection{基于语言模型的单语数据使用}
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsubsection{1. 模型融合}
\subsubsection{1. 语言模型融合}
\parinterval 单语数据是一种无标注的数据形式,而自然语言处理领域中最常用的无监督任务是语言模型,用来描述文字序列出现的规律。神经机器翻译模型本身也具备了语言模型的作用,在解码阶段,不只考虑和源语言句子的对应程度,也考虑了句子的流畅度。然而,神经机器翻译模型训练数据是较为有限的,而语言模型可以使用海量的单语数据进行训练,预测结果的流畅度更高。因此,融合语言模型和神经机器翻译模型来生成翻译结果也是一种有效的单语数据使用方法。
\parinterval 神经机器翻译模型自回归地生成翻译结果。对于一个双语句对$(x, y)$,神经机器翻译模型根据源语言句子$x$和前面生成的词来预测当前位置词的概率分布:
\begin{eqnarray}
\log_{P(y | x; \theta)} = \sum_{t}{\log_{P(y_t | x, y_{<t}; \theta)}}
\label{eq:16-1-xc}
\end{eqnarray}
\parinterval 这里,$\theta$是神经机器翻译模型的参数,$y_{<t}$表示第$t$个词前面生成的词。神经机器翻译模型考虑了结果的忠诚度和流畅度,忠诚度是指目标语翻译结果和源语言句子的语义保持一致,流利度是指翻译结果符合人类的语言习惯。忠实度需要建模源语言和目标语言之间的对应关系,但是流利度只和目标语言有关。因此,使用海量单语数据训练的语言模型可以生成更流利的句子,将其和神经机器翻译模型的解码过程结合,可以在保证忠实度的情况下,得到更加流利的翻译结果。具体的结合方法包括两种,分别是浅融合和深融合。{\red{(加个图)}}
\parinterval 这里,$\theta$是神经机器翻译模型的参数,$y_{<t}$表示第$t$个词前面生成的词。神经机器翻译模型考虑了结果的忠诚度和流畅度,忠诚度是指目标语翻译结果和源语言句子的语义保持一致,流利度是指翻译结果符合人类的语言习惯。忠实度需要建模源语言和目标语言之间的对应关系,但是流利度只和目标语言有关。因此,使用海量单语数据训练的语言模型可以生成更流利的句子,将其和神经机器翻译模型的解码过程结合,可以在保证忠实度的情况下,得到更加流利的翻译结果。具体的结合方法包括两种,分别是浅融合和深融合,如图\ref{fig:16-6-xc}所示。
%----------------------------------------------
\begin{figure}[htp]
\centering
\includegraphics[scale=0.45]{./Chapter16/Figures/lm-fusion.png}
\caption{语言模型浅融合与深融合}
\label{fig:16-6-xc}
\end{figure}
%-------------------------------------------
\parinterval 浅融合通过对神经机器翻译模型和语言模型的预测概率进行加权来得到最终的预测概率:
\begin{eqnarray}
\log_{P(y_t | x, y_{<t})} = \log_{P(y_t | x, y_{<t}; \theta_{TM})} + \beta \log_{P(y_t | y_{<t}; \theta_{LM})}
\log_{\funp{P}(y_t | x, y_{<t})} = \log_{\funp{P}(y_t | x, y_{<t}; \theta_{TM})} + \beta \log_{\funp{P}(y_t | y_{<t}; \theta_{LM})}
\label{eq:16-2-xc}
\end{eqnarray}
......@@ -146,7 +224,7 @@
\parinterval 深融合的预测方式为:
\begin{eqnarray}
\log_{P(y_t | x, y_{<t})}= \log_{P(y_t | x, y_{<t}; s_{t})}
\log_{\funp{P}(y_t | x, y_{<t})}= \log_{\funp{P}(y_t | x, y_{<t}; s_{t})}
\label{eq:16-3-xc}
\end{eqnarray}
......@@ -172,14 +250,14 @@ g_{t} = \sigma (w^{T}s_{t}^{TM} + b)
\parinterval 编码器-解码器框架天然就包含了对输入(源语言)和输出(目标语言)进行表示学习的过程。比如,在编码端需要学习一种分布式表示(Distributed Representation)来表示源语言句子的信息,这种分布式表示既包含单词的表示也包括整个序列的表示。因此,可以使用更大规模的源语言单语数据完成编码器的训练。
\parinterval 实现上述想法的一种手段是{\small\sffamily\bfnew{预训练}}\index{预训练}(Pre-training)\index{Pre-training}。常用的方法是将机器翻译模型中的一部分(比如,编码器)单独提抽取出来,之后用语言建模等方式在大规模单语数据上进行训练。得到优化后的参数后,将其重新放入神经机器翻译模型中,作为模型的初始值。最后,神经机器翻译模型在双语数据上进行{\small\sffamily\bfnew{微调}}\index{微调}(Fine-tuning)\index{Fine-tuning},以得到最终的翻译模型。图\ref{fig:16-4-xc}给出了机器翻译编码器预训练流程的示意图。
\parinterval 实现上述想法的一种手段是{\small\sffamily\bfnew{预训练}}\index{预训练}(Pre-training)\index{Pre-training}。常用的方法是将机器翻译模型中的一部分(比如,编码器)单独提抽取出来,之后用语言建模等方式在大规模单语数据上进行训练。得到优化后的参数后,将其重新放入神经机器翻译模型中,作为模型的初始值。最后,神经机器翻译模型在双语数据上进行{\small\sffamily\bfnew{微调}}\index{微调}(Fine-tuning)\index{Fine-tuning},以得到最终的翻译模型。图\ref{fig:16-7-xc}给出了机器翻译编码器预训练流程的示意图。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter16/Figures/figure-encoder-fin}
\caption{机器翻译编码器预训练流程}
\label{fig:16-4-xc}
\label{fig:16-7-xc}
\end{figure}
%-------------------------------------------
......@@ -189,13 +267,13 @@ g_{t} = \sigma (w^{T}s_{t}^{TM} + b)
\vspace{0.5em}
\item {\small\sffamily\bfnew{词嵌入预训练}}
\parinterval 词嵌入可以被看作是对每个独立单词进行的表示学习,在自然语言处理的众多任务中都扮演着重要角色\cite{DBLP:journals/corr/abs-1901-09069}。因此,可以使用{\chapternine}介绍的词嵌入方法,在外部单语数据上训练得到词嵌入,并把它作为神经机器翻译系统的词嵌入输入。
\parinterval 词嵌入可以被看作是对每个独立单词进行的表示学习,在自然语言处理的众多任务中都扮演着重要角色\upcite{DBLP:journals/corr/abs-1901-09069}。因此,可以使用{\chapternine}介绍的词嵌入方法,在外部单语数据上训练得到词嵌入,并把它作为神经机器翻译系统的词嵌入输入。
\parinterval 需要注意的是,在神经机器翻译中使用预训练的词嵌入有两种方法。一种方法是直接将词嵌入作为固定的输入,也就是在训练机器翻译模型的过程中,并不调整词嵌入的参数。这样做的目的是完全将词嵌入模块独立出来,机器翻译可以被看作是在固定的词嵌入输入上进行的建模。另一种方法是仍然遵循``预训练+微调''的策略,将词嵌入作为翻译模型的初始值。之后在机器翻译训练过程中,词嵌入模型结果会被进一步更新。近些年,在词嵌入预训练的基础上进行微调的方法受到研究者越来越多的青睐。
\vspace{0.5em}
\item {\small\sffamily\bfnew{编码器预训练}}
\parinterval 编码器在神经机器翻译中的作用是对源语句子中的信息进行抽象和提取,将离散的词序列编码成一组上下文相关的向量表示,本质上就是一个源语言端的句子表示模型。因此,可以使用预训练好的句子级表示模型(比如,BERT和XLM等),来初始化编码器参数。然而在实践中发现,这种参数初始化的方法在一些富资源语种上提升效果并不明显,甚至反而有些下降\cite{DBLP:journals/corr/abs-2002-06823}。原因可能在于预训练模型和编码器虽然都是对句子进行表示,但是由于目标任务不一致,二者的参数状态还是存在区别的。因此,也有一些做法将预训练模型和翻译模型在结构上进行融合,将预训练句子模型作为一个独立的模块来为编码器或者解码器提供句子级表示信息\cite{DBLP:journals/corr/abs-2002-06823}
\parinterval 编码器在神经机器翻译中的作用是对源语句子中的信息进行抽象和提取,将离散的词序列编码成一组上下文相关的向量表示,本质上就是一个源语言端的句子表示模型。因此,可以使用预训练好的句子级表示模型(比如,BERT和XLM等),来初始化编码器参数。然而在实践中发现,这种参数初始化的方法在一些富资源语种上提升效果并不明显,甚至反而有些下降\upcite{DBLP:journals/corr/abs-2002-06823}。原因可能在于预训练模型和编码器虽然都是对句子进行表示,但是由于目标任务不一致,二者的参数状态还是存在区别的。因此,也有一些做法将预训练模型和翻译模型在结构上进行融合,将预训练句子模型作为一个独立的模块来为编码器或者解码器提供句子级表示信息\upcite{DBLP:journals/corr/abs-2002-06823}
\vspace{0.5em}
\item {\small\sffamily\bfnew{序列到序列预训练}}
......@@ -208,27 +286,27 @@ g_{t} = \sigma (w^{T}s_{t}^{TM} + b)
\centering
\input{./Chapter16/Figures/figure-mass}
\caption{MASS 预训练方法}
\label{fig:16-5-xc}
\label{fig:16-8-xc}
\end{figure}
%-------------------------------------------
\parinterval 以MASS方法为例\cite{song2019mass},可以直接对整个编码器-解码器的结构进行预训练。训练中采用掩码的方式,将源语词序列中的片段替换成特殊词<mask>,然后在解码器端预测这个未知片段,如图\ref{fig:16-5-xc}所示,\#号表示特殊词<mask>。这种做法可以使得编码器端捕捉上下文信息,同时迫使解码器依赖于编码器,学习编码器和解码器之间的注意力进行预训练。而解码器端片段的预测也使得解码器能够学习到向前依赖的上下文表示。
\parinterval 以MASS方法为例\upcite{song2019mass},可以直接对整个编码器-解码器的结构进行预训练。训练中采用掩码的方式,将源语词序列中的片段替换成特殊词<mask>,然后在解码器端预测这个未知片段,如图\ref{fig:16-8-xc}所示,\#号表示特殊词<mask>。这种做法可以使得编码器端捕捉上下文信息,同时迫使解码器依赖于编码器,学习编码器和解码器之间的注意力进行预训练。而解码器端片段的预测也使得解码器能够学习到向前依赖的上下文表示。
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
%----------------------------------------------------------------------------------------
\subsubsection{3. 联合训练}
\parinterval {\small\sffamily\bfnew{多任务学习}}\index{多任务学习}(Multitask Learning)\index{Multitask Learning}是机器学习的一个子领域,是指同时学习多个独立但是相关的任务\cite{DBLP:journals/corr/Ruder17a}。多任务学习通过模型共享的方式,对多个模型进行学习,而这些模型都对应不同的任务,这样不同模型可以互相``促进''。在神经机器翻译中,为了使用单语数据,可以将翻译任务作为主任务,同时设置一些仅使用单语数据的子任务,通过这些子任务来捕捉单语数据中的语言知识\cite{DBLP:conf/emnlp/DomhanH17}
\parinterval {\small\sffamily\bfnew{多任务学习}}\index{多任务学习}(Multitask Learning)\index{Multitask Learning}是机器学习的一个子领域,是指同时学习多个独立但是相关的任务\upcite{DBLP:journals/corr/Ruder17a}。多任务学习通过模型共享的方式,对多个模型进行学习,而这些模型都对应不同的任务,这样不同模型可以互相``促进''。在神经机器翻译中,为了使用单语数据,可以将翻译任务作为主任务,同时设置一些仅使用单语数据的子任务,通过这些子任务来捕捉单语数据中的语言知识\upcite{DBLP:conf/emnlp/DomhanH17}
\parinterval 语言模型是使用目标端单语数据最直接的方式,但是翻译模型作为一个受限的语言模型,还需要依赖于源语,并不能直接进行多任务学习。针对这个问题,对原有翻译模型结构进行了修改,在解码器中增加了一个语言模型子层,将这个子层用于语言模型任务(图\ref{fig:16-6-xc})。在训练过程中,分别将双语数据和单语数据送入翻译模型和语言模型进行计算,得到的损失相加用于整体模型参数的梯度计算和参数更新,这里语言模型的参数是翻译模型的一部分。
\parinterval 语言模型是使用目标端单语数据最直接的方式,但是翻译模型作为一个受限的语言模型,还需要依赖于源语,并不能直接进行多任务学习。针对这个问题,对原有翻译模型结构进行了修改,在解码器中增加了一个语言模型子层,将这个子层用于语言模型任务(图\ref{fig:16-9-xc})。在训练过程中,分别将双语数据和单语数据送入翻译模型和语言模型进行计算,得到的损失相加用于整体模型参数的梯度计算和参数更新,这里语言模型的参数是翻译模型的一部分。
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter16/Figures/figure-target-side-multi-task-learning}
\caption{机器翻译中的单任务学习和多任务学习}
\label{fig:16-6-xc}
\label{fig:16-9-xc}
\end{figure}
%-------------------------------------------
......@@ -250,14 +328,14 @@ g_{t} = \sigma (w^{T}s_{t}^{TM} + b)
\parinterval 到目前为止,神经机器翻译系统都是每次一次只训练一个方向的模型。比如,给定中英的双语数据,一次只训练一个中到英或者英到中的翻译系统。既然两个方向的系统都使用同样的双语数据进行训练,那么是否可以一次训练同时得到两个方向的翻译系统呢?
\parinterval 回顾神经机器翻译系统的建模过程,给定一个互译的句对$(\mathbf s,\mathbf t)$,一个从源语言句子$\mathbf s$到目标语言句子$\mathbf t$的翻译被表示为求条件概率$\textrm{P}(\mathbf t|\mathbf s)$的问题。类似地,一个从目标语言句子$\mathbf t$到源语言句子$\mathbf s$的翻译可以表示为$\textrm{P}(\mathbf s|\mathbf t)$。通常来说,神经机器翻译的训练一次只得到一个方向的模型,也就是$\textrm{P}(\mathbf t|\mathbf s)$或者$\textrm{P}(\mathbf s|\mathbf t)$。这意味着$\textrm{P}(\mathbf t|\mathbf s)$$\textrm{P}(\mathbf s|\mathbf t)$之间是互相独立的。$\textrm{P}(\mathbf t|\mathbf s)$$\textrm{P}(\mathbf s|\mathbf t)$是否真的没有关系呢?比如,$\mathbf s$$\mathbf t$是相同大小的向量,且$\mathbf s$$\mathbf t$的变换是一个线性变换,也就是与一个方阵$\mathbf{W}$做矩阵乘法:
\parinterval 回顾神经机器翻译系统的建模过程,给定一个互译的句对$(\mathbi{s},\mathbi{t})$,一个从源语言句子$\mathbi{s}$到目标语言句子$\mathbi{t}$的翻译被表示为求条件概率$\funp{P}(\mathbi{t}|\mathbi{s})$的问题。类似地,一个从目标语言句子$\mathbi{t}$到源语言句子$\mathbi{s}$的翻译可以表示为$\funp{P}(\mathbi{s}|\mathbi{t})$。通常来说,神经机器翻译的训练一次只得到一个方向的模型,也就是$\funp{P}(\mathbi{t}|\mathbi{s})$或者$\funp{P}(\mathbi{s}|\mathbi{t})$。这意味着$\funp{P}(\mathbi{t}|\mathbi{s})$$\funp{P}(\mathbi{s}|\mathbi{t})$之间是互相独立的。$\funp{P}(\mathbi{t}|\mathbi{s})$$\funp{P}(\mathbi{s}|\mathbi{t})$是否真的没有关系呢?比如,$\mathbi{s}$$\mathbi{t}$是相同大小的向量,且$\mathbi{s}$$\mathbi{t}$的变换是一个线性变换,也就是与一个方阵$\mathbi{W}$做矩阵乘法:
\begin{eqnarray}
\mathbf t = \mathbf s \cdot \mathbf{W}
\mathbi{t} = \mathbi{s} \cdot \mathbi{W}
\label{eq:16-6-xc}
\end{eqnarray}
\parinterval 这里可以把$\mathbf s$$\mathbf t$都看作分布式的向量表示;$\mathbf{W}$应当是一个满秩矩阵,否则对于任意一个$\mathbf s$经过$\mathbf{W}$变换得到的$\mathbf t$只落在所有可能的$\mathbf t$的一个子空间内,即在给定$\mathbf{W}$的情况下有些$\mathbf t$不能被任何一个$\mathbf s$表达,而这不符合常识,因为不管是什么句子,我们总能找到它的一种译文。若$\mathbf{W}$是满秩矩阵说明$\mathbf{W}$可逆,也就是给定$\mathbf s$$\mathbf t$的变换$\mathbf{W}$下,$\mathbf t$$\mathbf s$的变换必然是$\mathbf{W}$的逆而不是其他矩阵。这个例子说明$\textrm{P}(\mathbf t|\mathbf s)$$\textrm{P}(\mathbf s|\mathbf t)$直觉上应当存在联系。当然,$\mathbf s$$\mathbf t$之间是否存在简单的线性变换关系并没有结论,但是上面的例子给出了一种对源语言句子和目标语言句子进行相互转化的思路。实际上,研究人员已经通过一些数学技巧用目标函数来把$\textrm{P}(\mathbf t|\mathbf s)$$\textrm{P}(\mathbf s|\mathbf t)$联系起来,这样训练神经机器翻译系统一次就可以同时得到两个方向的翻译模型,使得训练变得更加高效\cite{Hassan2018AchievingHP}
\parinterval 这里可以把$\mathbi{s}$$\mathbi{t}$都看作分布式的向量表示;$\mathbi{W}$应当是一个满秩矩阵,否则对于任意一个$\mathbi{s}$经过$\mathbi{W}$变换得到的$\mathbi{t}$只落在所有可能的$\mathbi{t}$的一个子空间内,即在给定$\mathbi{W}$的情况下有些$\mathbi{t}$不能被任何一个$\mathbi{s}$表达,而这不符合常识,因为不管是什么句子,我们总能找到它的一种译文。若$\mathbi{W}$是满秩矩阵说明$\mathbi{W}$可逆,也就是给定$\mathbi{s}$$\mathbi{t}$的变换$\mathbi{W}$下,$\mathbi{t}$$\mathbi{s}$的变换必然是$\mathbi{W}$的逆而不是其他矩阵。这个例子说明$\funp{P}(\mathbi{t}|\mathbi{s})$$\funp{P}(\mathbi{s}|\mathbi{t})$直觉上应当存在联系。当然,$\mathbi{s}$$\mathbi{t}$之间是否存在简单的线性变换关系并没有结论,但是上面的例子给出了一种对源语言句子和目标语言句子进行相互转化的思路。实际上,研究人员已经通过一些数学技巧用目标函数来把$\funp{P}(\mathbi{t}|\mathbi{s})$$\funp{P}(\mathbi{s}|\mathbi{t})$联系起来,这样训练神经机器翻译系统一次就可以同时得到两个方向的翻译模型,使得训练变得更加高效\upcite{Hassan2018AchievingHP}
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
......@@ -265,20 +343,20 @@ g_{t} = \sigma (w^{T}s_{t}^{TM} + b)
\subsubsection{2. 有监督对偶学习}
\parinterval 除了用条件概率$\textrm{P}(\mathbf t|\mathbf s)$建模翻译问题,还可以使用联合分布$\textrm{P}(\mathbf s,\mathbf t)$进行建模\cite{DBLP:conf/icml/XiaQCBYL17}。根据条件概率的定义,有:
\parinterval 除了用条件概率$\funp{P}(\mathbi{t}|\mathbi{s})$建模翻译问题,还可以使用联合分布$\funp{P}(\mathbi{s},\mathbi{t})$进行建模\upcite{DBLP:conf/icml/XiaQCBYL17}。根据条件概率的定义,有:
\begin{eqnarray}
\textrm{P}(\mathbf s,\mathbf t) &=& \textrm{P}(\mathbf s)\textrm{P}(\mathbf t|\mathbf s) \nonumber \\
&=& \textrm{P}(t)\textrm{P}(\mathbf s|\mathbf t)
\funp{P}(\mathbi{s},\mathbi{t}) &=& \funp{P}(\mathbi{s})\funp{P}(\mathbi{t}|\mathbi{s}) \nonumber \\
&=& \funp{P}(t)\funp{P}(\mathbi{s}|\mathbi{t})
\label{eq:16-7-xc}
\end{eqnarray}
\parinterval 公式\ref{eq:16-7-xc}很自然地把两个方向的翻译模型$\textrm{P}(\mathbf t|\mathbf s)$$\textrm{P}(\mathbf s|\mathbf t)$以及两个语言模型$\textrm{P}(\mathbf s)$$\textrm{P}(\mathbf t)$联系起来:$\textrm{P}(\mathbf s)\textrm{P}(\mathbf t|\mathbf s)$应该与$\textrm{P}(\mathbf t)\textrm{P}(\mathbf s|\mathbf t)$接近,因为它们都表达了同一个联合分布$\textrm{P}(\mathbf s,\mathbf t)$。因此,在构建训练两个方向的翻译模型的目标函数时,除了它们单独训练时各自使用的极大似然估计目标函数,可以额外增加一个目标项来鼓励两个方向的翻译模型去满足公式\ref{eq:16-8-xc}
\parinterval 公式\ref{eq:16-7-xc}很自然地把两个方向的翻译模型$\funp{P}(\mathbi{t}|\mathbi{s})$$\funp{P}(\mathbi{s}|\mathbi{t})$以及两个语言模型$\funp{P}(\mathbi{s})$$\funp{P}(\mathbi{t})$联系起来:$\funp{P}(\mathbi{s})\funp{P}(\mathbi{t}|\mathbi{s})$应该与$\funp{P}(\mathbi{t})\funp{P}(\mathbi{s}|\mathbi{t})$接近,因为它们都表达了同一个联合分布$\funp{P}(\mathbi{s},\mathbi{t})$。因此,在构建训练两个方向的翻译模型的目标函数时,除了它们单独训练时各自使用的极大似然估计目标函数,可以额外增加一个目标项来鼓励两个方向的翻译模型去满足公式\ref{eq:16-8-xc}
\begin{eqnarray}
\mathcal{L} = (\textrm{log P}(\mathbf s) + \textrm{log P}(\mathbf t|\mathbf s) - \textrm{log P}(\mathbf t) - \textrm{log P}(\mathbf s|\mathbf t))^{2}
\mathcal{L} = (\textrm{log P}(\mathbi{s}) + \textrm{log P}(\mathbi{t}|\mathbi{s}) - \textrm{log P}(\mathbi{t}) - \textrm{log P}(\mathbi{s}|\mathbi{t}))^{2}
\label{eq:16-8-xc}
\end{eqnarray}
\parinterval 这里$\textrm{P}(\mathbf s)$$\textrm{P}(\mathbf t)$这两个语言模型是预先训练好的,并不参与翻译模型的训练。可以看到,对于单独的一个模型来说,其目标函数增加了与另外一个方向的模型相关的项。这样的形式与L1/L2正则化非常类似(见\ref{subsection-7.3.1}节),因此可以把这个方法看作是一种任务特定的正则化的手段(由翻译任务本身的性质所启发而来)。由于两个方向的翻译模型和语言模型相互影响,这种方法能得到比基于单个方向训练效果更好的模型。
\parinterval 这里$\funp{P}(\mathbi{s})$$\funp{P}(\mathbi{t})$这两个语言模型是预先训练好的,并不参与翻译模型的训练。可以看到,对于单独的一个模型来说,其目标函数增加了与另外一个方向的模型相关的项。这样的形式与L1/L2正则化非常类似(见\ref{subsection-7.3.1}节),因此可以把这个方法看作是一种任务特定的正则化的手段(由翻译任务本身的性质所启发而来)。由于两个方向的翻译模型和语言模型相互影响,这种方法能得到比基于单个方向训练效果更好的模型。
%----------------------------------------------------------------------------------------
% NEW SUBSUB-SECTION
......@@ -286,22 +364,22 @@ g_{t} = \sigma (w^{T}s_{t}^{TM} + b)
\subsubsection{3. 无监督对偶学习}
\parinterval 在有监督对偶学习对联合分布$\textrm{P}(\mathbf s,\mathbf t)$建模的基础上,如果把$\mathbf t$看作一个隐变量,那么可以得到边缘分布$\textrm{P}(\mathbf s)$,也就是关于$\mathbf s$的语言模型:
\parinterval 在有监督对偶学习对联合分布$\funp{P}(\mathbi{s},\mathbi{t})$建模的基础上,如果把$\mathbf t$看作一个隐变量,那么可以得到边缘分布$\funp{P}(\mathbi{s})$,也就是关于$\mathbi{s}$的语言模型:
\begin{eqnarray}
\textrm{P}(\mathbf s) &=& \sum_{\mathbf t}\textrm{P}(\mathbf s,\mathbf t) \nonumber \\
&=& \sum_{\mathbf t}\textrm{P}(\mathbf s|\mathbf t)\textrm{P}(\mathbf t|\mathbf s)
\funp{P}(\mathbi{s}) &=& \sum_{\mathbi{t}}\funp{P}(\mathbi{s},\mathbi{t}) \nonumber \\
&=& \sum_{\mathbi{t}}\funp{P}(\mathbi{s}|\mathbi{t})\funp{P}(\mathbi{t}|\mathbi{s})
\label{eq:16-9-xc}
\end{eqnarray}
\noindent 公式\ref{eq:16-9-xc}假设$\textrm{P}(\mathbf s|\mathbf t)=\textrm{P}(\mathbf s|\mathbf s,\mathbf t)$。这个假设显然是成立的,因为当知道一个句子的译文时,并不需要知道它的源文就可以把它翻译回去。如果直接优化(最大化)公式\ref{eq:16-9-xc}右侧,相当于对这个等式$\textrm{P}(\mathbf s|\mathbf t)$$\textrm{P}(\mathbf t|\mathbf s)$施加了{\small\sffamily\bfnew{循环一致性}}\index{循环一致性}(Circle Consistency)\index{Circle Consistency}的约束\cite{DBLP:conf/iccv/ZhuPIE17},也就是对于一个句子$\mathbf s$,通过$\textrm{P}(\mathbf t|\mathbf s)$把它翻译成$\mathbf t$后,根据$\textrm{P}(\mathbf s|\mathbf t)$应该能重新翻译出$\mathbf s$,如图\ref{fig:16-7-xc}所示。公式\ref{fig:16-7-xc}给出了同时优化$\textrm{P}(\mathbf s|\mathbf t)$$\textrm{P}(\mathbf t|\mathbf s)$的一个目标函数形式。这个目标函数的一个额外的好处是它本质上是在学习一个由$\textrm{P}(\mathbf s|\mathbf t)$$\textrm{P}(\mathbf t|\mathbf s)$组成的语言模型$\textrm{P}(\mathbf s)$,而$\textrm{P}(\mathbf s)$的学习依赖于单语数据,这意味着这个目标函数可以很自然地直接使用大量单语数据来同时训练两个翻译模型。相同的结论可以推广到$\textrm{P}(\mathbf t)$\cite{DBLP:conf/nips/HeXQWYLM16}
\noindent 公式\ref{eq:16-9-xc}假设$\funp{P}(\mathbi{s}|\mathbi{t})=\funp{P}(\mathbi{s}|\mathbi{s},\mathbi{t})$。这个假设显然是成立的,因为当知道一个句子的译文时,并不需要知道它的源文就可以把它翻译回去。如果直接优化(最大化)公式\ref{eq:16-9-xc}右侧,相当于对这个等式$\funp{P}(\mathbi{s}|\mathbi{t})$$\funp{P}(\mathbi{t}|\mathbi{s})$施加了{\small\sffamily\bfnew{循环一致性}}\index{循环一致性}(Circle Consistency)\index{Circle Consistency}的约束\upcite{DBLP:conf/iccv/ZhuPIE17},也就是对于一个句子$\mathbi{s}$,通过$\funp{P}(\mathbi{t}|\mathbi{s})$把它翻译成$\mathbi{t}$后,根据$\funp{P}(\mathbi{s}|\mathbi{t})$应该能重新翻译出$\mathbi{s}$,如图\ref{fig:16-7-xc}所示。公式\ref{fig:16-7-xc}给出了同时优化$\funp{P}(\mathbi{s}|\mathbi{t})$$\funp{P}(\mathbi{t}|\mathbi{s})$的一个目标函数形式。这个目标函数的一个额外的好处是它本质上是在学习一个由$\funp{P}(\mathbi{s}|\mathbi{t})$$\funp{P}(\mathbi{t}|\mathbi{s})$组成的语言模型$\funp{P}(\mathbi{s})$,而$\funp{P}(\mathbi{s})$的学习依赖于单语数据,这意味着这个目标函数可以很自然地直接使用大量单语数据来同时训练两个翻译模型。相同的结论可以推广到$\funp{P}(\mathbi{t})$\upcite{DBLP:conf/nips/HeXQWYLM16}
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter16/Figures/figure-cycle-consistency}
\caption{循环一致性}
\label{fig:16-7-xc}
\label{fig:16-10-xc}
\end{figure}
%----------------------------------------------
......@@ -309,9 +387,9 @@ g_{t} = \sigma (w^{T}s_{t}^{TM} + b)
\begin{itemize}
\vspace{0.5em}
\item 计算公式\ref{eq:16-9-xc}要枚举所有可能的隐变量$\mathbf t$的取值,也就是所有可能产生的目标语句子,而这是不可能的,因此一般会通过平均多个随机产生的$\mathbf t$对应的损失来近似真正的目标函数值;
\item 计算公式\ref{eq:16-9-xc}要枚举所有可能的隐变量$\mathbi{t}$的取值,也就是所有可能产生的目标语句子,而这是不可能的,因此一般会通过平均多个随机产生的$\mathbi{t}$对应的损失来近似真正的目标函数值;
\vspace{0.5em}
\item 从公式\ref{eq:16-9-xc}可以看到,在$\textrm{P}(\mathbf s)$上计算完目标函数值后,得到的梯度首先传递给$\textrm{P}(\mathbf s|\mathbf t)$,然后通过$\textrm{P}(\mathbf s|\mathbf t)$传递给$\textrm{P}(\mathbf t|\mathbf s)$。由于$\textrm{P}(\mathbf s|\mathbf t)$的输入$\mathbf t$$\textrm{P}(\mathbf t|\mathbf s)$采样得到,而采样操作不可导,导致梯度的传播在$\textrm{P}(\mathbf t|\mathbf s)$的输出处断开了,因此$\textrm{P}(\mathbf t|\mathbf s)$接收不到任何梯度来进行更新。常见的解决方案是使用策略梯度\cite{DBLP:conf/nips/SuttonMSM99}。它把$\textrm{P}(\mathbf t|\mathbf s)$采样得到的$\mathbf t$当成$\textrm{P}(\mathbf t|\mathbf s)$的目标来学习,并使用$\textrm{log P}(\mathbf s|\mathbf t)$$\textrm{P}(\mathbf t|\mathbf s)$的损失进行加权。但是由于仅使用少量样本来近似真正的目标函数,得到的策略梯度方差非常大,系统无法稳定学习,特别是训练的初期,因此通常会需要先使用双语数据预训练两个方向的翻译模型,然后把公式\ref{eq:16-9-xc}作为正常训练的一个正则化项使用。
\item 从公式\ref{eq:16-9-xc}可以看到,在$\funp{P}(\mathbi{s})$上计算完目标函数值后,得到的梯度首先传递给$\funp{P}(\mathbi{s}|\mathbi{t})$,然后通过$\funp{P}(\mathbi{s}|\mathbi{t})$传递给$\funp{P}(\mathbi{t}|\mathbi{s})$。由于$\funp{P}(\mathbi{s}|\mathbi{t})$的输入$\mathbi{t}$$\funp{P}(\mathbi{t}|\mathbi{s})$采样得到,而采样操作不可导,导致梯度的传播在$\funp{P}(\mathbi{t}|\mathbi{s})$的输出处断开了,因此$\funp{P}(\mathbi{t}|\mathbi{s})$接收不到任何梯度来进行更新。常见的解决方案是使用策略梯度\upcite{DBLP:conf/nips/SuttonMSM99}。它把$\funp{P}(\mathbi{t}|\mathbi{s})$采样得到的$\mathbi{t}$当成$\funp{P}(\mathbi{t}|\mathbi{s})$的目标来学习,并使用$\textrm{log P}(\mathbi{s}|\mathbi{t})$$\funp{P}(\mathbi{t}|\mathbi{s})$的损失进行加权。但是由于仅使用少量样本来近似真正的目标函数,得到的策略梯度方差非常大,系统无法稳定学习,特别是训练的初期,因此通常会需要先使用双语数据预训练两个方向的翻译模型,然后把公式\ref{eq:16-9-xc}作为正常训练的一个正则化项使用。
\vspace{0.5em}
\end{itemize}
......@@ -321,13 +399,15 @@ g_{t} = \sigma (w^{T}s_{t}^{TM} + b)
\subsubsection{4. 翻译中回译}
\parinterval 重新回顾公式\ref{eq:16-9-xc}对应的目标函数,无监督对偶学习跟回译(假设现在只在一个句对$(\mathbf s,\mathbf t)$上做回译)之间有着很深的内在联系:给定一个句子$\mathbf s$,无监督对偶学习和回译都首先用$\textrm{P}(\mathbf t|\mathbf s)$$\mathbf s$翻译成$\mathbf t$,然后无监督对偶学习最大化$\textrm{P}(\mathbf s|\mathbf t)\textrm{P}(\mathbf t|\mathbf s)$,而回译则是最大化$\textrm{P}(\mathbf s|\mathbf t)$。可以看到,当无监督对偶学习假设$\textrm{P}(\mathbf t|\mathbf s)$是一个完美的翻译模型的时候,它与回译是等价的。此外,在共享两个方向的模型参数$\theta$的情况下,可以看到无监督对偶学习的梯度为$\frac{\partial \textrm{P}(\mathbf s)}{\partial \theta} =\textrm{P}(\mathbf t|\mathbf s) \frac{\partial \textrm{P}(\mathbf s|\mathbf t)}{\partial \theta}+\textrm{P}(\mathbf s|\mathbf t) \frac{\partial \textrm{P}(\mathbf t|\mathbf s)}{\partial \theta} $,而回译的梯度为$\frac{\partial \textrm{P}(\mathbf s|\mathbf t)}{\partial \theta}$。从这个角度出发,无监督对偶学习与回译都在优化语言模型$\textrm{P}(\mathbf s)$这个目标函数,只不过回译使用对$\theta$有偏的梯度估计。
\parinterval 重新回顾公式\ref{eq:16-9-xc}对应的目标函数,无监督对偶学习跟回译(假设现在只在一个句对$(\mathbi{s},\mathbi{t})$上做回译)之间有着很深的内在联系:给定一个句子$\mathbi{s}$,无监督对偶学习和回译都首先用$\funp{P}(\mathbi{t}|\mathbi{s})$$\mathbi{s}$翻译成$\mathbi{t}$,然后无监督对偶学习最大化$\funp{P}(\mathbi{s}|\mathbi{t})\funp{P}(\mathbi{t}|\mathbi{s})$,而回译则是最大化$\funp{P}(\mathbi{s}|\mathbi{t})$。可以看到,当无监督对偶学习假设$\funp{P}(\mathbi{t}|\mathbi{s})$是一个完美的翻译模型的时候,它与回译是等价的。此外,在共享两个方向的模型参数$\theta$的情况下,可以看到无监督对偶学习的梯度为$\frac{\partial \funp{P}(\mathbi{s})}{\partial \theta} =\funp{P}(\mathbi{t}|\mathbi{s}) \frac{\partial \funp{P}(\mathbi{s}|\mathbi{t})}{\partial \theta}+\funp{P}(\mathbi{s}|\mathbi{t}) \frac{\partial \funp{P}(\mathbi{t}|\mathbi{s})}{\partial \theta} $,而回译的梯度为$\frac{\partial \funp{P}(\mathbi{s}|\mathbi{t})}{\partial \theta}$。从这个角度出发,无监督对偶学习与回译都在优化语言模型$\funp{P}(\mathbi{s})$这个目标函数,只不过回译使用对$\theta$有偏的梯度估计。
\parinterval 这个事实说明对回译进行适当的增广后应该能取得与无监督对偶学习相似的结果。{\small\sffamily\bfnew{ 翻译中回译}}\index{翻译中回译}(On-the-fly Back-translation)\index{On-the-fly Back-translation}就是这样一个例子。一般回译的过程是先把数据集里所有$\mathbi{s}$都翻译出来,然后只训练$\funp{P}(\mathbi{s}|\mathbi{t})$。区别于回译,从数据集中采集到一个$\mathbi{s}$之后,翻译中回译立刻把$\mathbi{s}$翻译成$\mathbi{t}$,然后训练$\funp{P}(\mathbi{s}|\mathbi{t})$,并且在下一步迭代中采集一个$\mathbi{t}$然后训练$\funp{P}(\mathbi{t}|\mathbi{s})$,这样交替更新$\funp{P}(\mathbi{s}|\mathbi{t})$$\funp{P}(\mathbi{t}|\mathbi{s})$。尽管翻译中回译无法像无监督对偶学习那样在一个样本里通过梯度把$\funp{P}(\mathbi{s}|\mathbi{t})$的信息传到$\funp{P}(\mathbi{t}|\mathbi{s})$,但是它交替更新$\funp{P}(\mathbi{s}|\mathbi{t})$$\funp{P}(\mathbi{t}|\mathbi{s})$的策略允许$\funp{P}(\mathbi{s}|\mathbi{t})$在两个样本间通过其产生的输出$\mathbi{s}$来把信息传递到$\funp{P}(\mathbi{t}|\mathbi{s})$,因此也能获得相近的效果,并且在实现和计算上都非常高效。翻译中回译已经在无监督神经机器翻译系统训练中被广泛使用\upcite{lample2019cross}
\parinterval 这个事实说明对回译进行适当的增广后应该能取得与无监督对偶学习相似的结果。{\small\sffamily\bfnew{ 翻译中回译}}\index{翻译中回译}(On-the-fly Back-translation)\index{On-the-fly Back-translation}就是这样一个例子。一般回译的过程是先把数据集里所有$\mathbf s$都翻译出来,然后只训练$\textrm{P}(\mathbf s|\mathbf t)$。区别于回译,从数据集中采集到一个$\mathbf s$之后,翻译中回译立刻把$\mathbf s$翻译成$\mathbf t$,然后训练$\textrm{P}(\mathbf s|\mathbf t)$,并且在下一步迭代中采集一个$\mathbf t$然后训练$\textrm{P}(\mathbf t|\mathbf s)$,这样交替更新$\textrm{P}(\mathbf s|\mathbf t)$$\textrm{P}(\mathbf t|\mathbf s)$。尽管翻译中回译无法像无监督对偶学习那样在一个样本里通过梯度把$\textrm{P}(\mathbf s|\mathbf t)$的信息传到$\textrm{P}(\mathbf t|\mathbf s)$,但是它交替更新$\textrm{P}(\mathbf s|\mathbf t)$$\textrm{P}(\mathbf t|\mathbf s)$的策略允许$\textrm{P}(\mathbf s|\mathbf t)$在两个样本间通过其产生的输出$\mathbf s$来把信息传递到$\textrm{P}(\mathbf t|\mathbf s)$,因此也能获得相近的效果,并且在实现和计算上都非常高效。翻译中回译已经在无监督神经机器翻译系统训练中被广泛使用\cite{lample2019cross}
\subsubsection{triangular}
[Triangular Architecture for Rare Language Translation]
(冰浩之前讲过,可以冰浩来写)
[Semi-Supervised Learning for Neural Machine Translation]
%----------------------------------------------------------------------------------------
% NEW SECTION
......
%%%------------------------------------------------------------------------------------------------------------
\begin{tikzpicture}
\begin{scope}
\node [anchor=west] (w0) at (0,0) {\small{${\bm o}_{i-3}$}};
\node [anchor=west] (w1) at ([xshift=2.8em]w0.east) {\small{${\bm o}_{i-2}$}};
\node [anchor=west] (w2) at ([xshift=2.8em]w1.east) {\small{${\bm o}_{i-1}$}};
\node [anchor=west] (w0) at (0,0) {\small{${\mathbi{o}}_{i-3}$}};
\node [anchor=west] (w1) at ([xshift=2.8em]w0.east) {\small{${\mathbi{o}}_{i-2}$}};
\node [anchor=west] (w2) at ([xshift=2.8em]w1.east) {\small{${\mathbi{o}}_{i-1}$}};
\node [anchor=north] (index0) at ([yshift=0.5em]w0.south) {\footnotesize(index)};
\node [anchor=north] (index1) at ([yshift=0.5em]w1.south) {\footnotesize(index)};
\node [anchor=north] (index2) at ([yshift=0.5em]w2.south) {\footnotesize(index)};
\node [anchor=south,draw,inner sep=3pt,fill=blue!20!white] (e0) at ([yshift=1em]w0.north) {\footnotesize{${\bm e}_1={\bm o}_{i-3} {\bm C}$}};
\node [anchor=south,draw,inner sep=3pt,fill=blue!20!white] (e1) at ([yshift=1em]w1.north) {\footnotesize{${\bm e}_2={\bm o}_{i-2} {\bm C}$}};
\node [anchor=south,draw,inner sep=3pt,fill=blue!20!white] (e2) at ([yshift=1em]w2.north) {\footnotesize{${\bm e}_3={\bm o}_{i-1} {\bm C}$}};
\node [anchor=south,draw,inner sep=3pt,fill=blue!20!white] (e0) at ([yshift=1em]w0.north) {\footnotesize{${\mathbi{e}}_1={\mathbi{o}}_{i-3} {\mathbi{C}}$}};
\node [anchor=south,draw,inner sep=3pt,fill=blue!20!white] (e1) at ([yshift=1em]w1.north) {\footnotesize{${\mathbi{e}}_2={\mathbi{o}}_{i-2} {\mathbi{C}}$}};
\node [anchor=south,draw,inner sep=3pt,fill=blue!20!white] (e2) at ([yshift=1em]w2.north) {\footnotesize{${\mathbi{e}}_3={\mathbi{o}}_{i-1} {\mathbi{C}}$}};
\node [anchor=south,draw,minimum width=11em,inner sep=3pt,fill=orange!20!white] (h0) at ([yshift=1.5em]e1.north) {\footnotesize{${\bm h}_0=\textrm{Tanh}([{\bm e}_1,{\bm e}_2,{\bm e}_3]{\bm H} + {\bm d})$}};
\node [anchor=south,draw,minimum width=9em,inner sep=3pt,fill=orange!20!white] (h1) at ([yshift=1.5em]h0.north) {\footnotesize{${\bm y}=\textrm{Softmax}({\bm h}_0 {\bm U})$}};
\node [anchor=south,draw,minimum width=11em,inner sep=3pt,fill=orange!20!white] (h0) at ([yshift=1.5em]e1.north) {\footnotesize{${\mathbi{h}}_0=\textrm{Tanh}([{\mathbi{e}}_1,{\mathbi{e}}_2,{\mathbi{e}}_3]{\mathbi{H}} + {\mathbi{d}})$}};
\node [anchor=south,draw,minimum width=9em,inner sep=3pt,fill=orange!20!white] (h1) at ([yshift=1.5em]h0.north) {\footnotesize{${\mathbi{y}}=\textrm{Softmax}({\mathbi{h}}_0 {\mathbi{U}})$}};
\node [anchor=south] (ylabel) at ([yshift=1em]h1.north) {\small{$\funp{P}(w_i|w_{i-3}w_{i-2}w_{i-1})$}};
\draw [->,line width=1pt] ([yshift=0.1em]w0.north) -- ([yshift=-0.1em]e0.south);
......
......@@ -8,12 +8,12 @@
\draw[->,thick] (-6,0) -- (5,0);
\draw[->,thick] (-5,-4) -- (-5,5);
\draw [<-] (-2.5,4) -- (-2,5) node [pos=1,right,inner sep=2pt] {\footnotesize{答案$\tilde{\vectorn{\emph{y}}}_i$}};
\draw [<-] (-2.5,4) -- (-2,5) node [pos=1,right,inner sep=2pt] {\footnotesize{答案$\tilde{\mathbi{y}}_i$}};
{
\draw [<-] (-3,-3) -- (-2.5,-2) node [pos=0,left,inner sep=2pt] {\footnotesize{预测${\vectorn{\emph{y}}}_i$}};}
\draw [<-] (-3,-3) -- (-2.5,-2) node [pos=0,left,inner sep=2pt] {\footnotesize{预测${\mathbi{y}}_i$}};}
{
\draw [<-] (2.3,1) -- (3.3,2) node [pos=1,right,inner sep=2pt] {\footnotesize{偏差$|\tilde{\vectorn{\emph{y}}}_i - {\vectorn{\emph{y}}}_i|$}};
\draw [<-] (2.3,1) -- (3.3,2) node [pos=1,right,inner sep=2pt] {\footnotesize{偏差$|\tilde{\mathbi{y}}_i - {\mathbi{y}}_i|$}};
\foreach \x in {-3.8,-3.7,...,3.0}{
\pgfmathsetmacro{\p}{- 1/14 * (\x + 4) * (\x + 1) * (\x - 1) * (\x - 3)};
\pgfmathsetmacro{\q}{- 1/14 * (4*\x*\x*\x + 3*\x*\x - 26*\x - 1)};
......
%%%------------------------------------------------------------------------------------------------------------
\begin{tikzpicture}
\begin{scope}
\node [anchor=center,draw,fill=red!20,minimum height=1.8em,minimum width=2.5em] (h) at (0,0) {${\bm h}^{k-1}$};
\node [anchor=west,draw,fill=blue!20,minimum height=1.8em,minimum width=2.5em] (s) at ([xshift=6em]h.east) {${\bm s}^{k}$};
\node [anchor=west,draw,fill=green!20,minimum height=1.8em,minimum width=2.5em] (h2) at ([xshift=6em]s.east) {${\bm h}^{k}$};
\node [anchor=center,draw,fill=red!20,minimum height=1.8em,minimum width=2.5em] (h) at (0,0) {${\mathbi{h}}^{k-1}$};
\node [anchor=west,draw,fill=blue!20,minimum height=1.8em,minimum width=2.5em] (s) at ([xshift=6em]h.east) {${\mathbi{s}}^{k}$};
\node [anchor=west,draw,fill=green!20,minimum height=1.8em,minimum width=2.5em] (h2) at ([xshift=6em]s.east) {${\mathbi{h}}^{k}$};
\node [anchor=east] (prev) at ([xshift=-2em]h.west) {...};
\node [anchor=west] (next) at ([xshift=2em]h2.east) {...};
\draw [->,thick] ([xshift=0.1em]prev.east) -- ([xshift=-0.1em]h.west);
\draw [->,thick] ([xshift=0.1em]h.east) -- ([xshift=-0.1em]s.west) node [pos=0.5,below] {\scriptsize{${\bm s}^k = {\bm h}^{k-1}{\bm W}^k$}};
\draw [->,thick] ([xshift=0.1em]s.east) -- ([xshift=-0.1em]h2.west) node [pos=0.5,below] {\scriptsize{${\bm h}^k = f^k({\bm s}^{k})$}};
\draw [->,thick] ([xshift=0.1em]h.east) -- ([xshift=-0.1em]s.west) node [pos=0.5,below] {\scriptsize{${\mathbi{s}}^k = {\mathbi{h}}^{k-1}{\mathbi{W}}^k$}};
\draw [->,thick] ([xshift=0.1em]s.east) -- ([xshift=-0.1em]h2.west) node [pos=0.5,below] {\scriptsize{${\mathbi{h}}^k = f^k({\mathbi{s}}^{k})$}};
\draw [->,thick] ([xshift=0.1em]h2.east) -- ([xshift=-0.1em]next.west);
{
......@@ -29,15 +29,15 @@
}
{
\node [anchor=south] (h2label) at (h2.north) {$\frac{\partial L}{\partial {\bm h}^{k}}$};
\node [anchor=south] (h2label) at (h2.north) {$\frac{\partial L}{\partial {\mathbi{h}}^{k}}$};
}
{
\node [anchor=south] (slabel) at (s.north) {$\frac{\partial L}{\partial {\bm s}^{k}}$};
\node [anchor=south] (slabel) at (s.north) {$\frac{\partial L}{\partial {\mathbi{s}}^{k}}$};
}
{
\node [anchor=south] (hlabel) at (h.north) {$\frac{\partial L}{\partial {\bm h}^{k-1}}$, $\frac{\partial L}{\partial {\bm W}^{k}}$};
\node [anchor=south] (hlabel) at (h.north) {$\frac{\partial L}{\partial {\mathbi{h}}^{k-1}}$, $\frac{\partial L}{\partial {\mathbi{W}}^{k}}$};
}
\end{scope}
......
%%%------------------------------------------------------------------------------------------------------------
\begin{tikzpicture}
\begin{scope}
\node [anchor=west,minimum height=1.7em,fill=blue!20,draw] (s) at (0,0) {${\bm s}^{K}$};
\node [anchor=west,minimum height=1.7em,fill=green!20,draw] (h2) at ([xshift=5.5em]s.east) {${\bm h}^{K}$};
\node [anchor=west,minimum height=1.7em,fill=blue!20,draw] (s) at (0,0) {$\mathbi{s}^{K}$};
\node [anchor=west,minimum height=1.7em,fill=green!20,draw] (h2) at ([xshift=5.5em]s.east) {$\mathbi{h}^{K}$};
\node [anchor=west,minimum height=1.7em,fill=orange!20,draw] (l) at ([xshift=5.5em]h2.east) {$L$};
\draw [->] (s.east) -- (h2.west);
\draw [->] (h2.east) -- (l.west);
\draw [->,very thick,red] ([yshift=1em,xshift=-0.1em]l.north) -- ([yshift=1em,xshift=0.1em]h2.north) node [pos=0.5,above] {\scriptsize{求梯度{$\frac{\partial L}{\partial {\bm h}^K} = ?$}}};
\draw [->,very thick,red] ([yshift=1em,xshift=-0.1em]h2.north) -- ([yshift=1em,xshift=0.1em]s.north) node [pos=0.5,above] {\scriptsize{求梯度{$\frac{\partial f^K({\bm s}^K)}{\partial {\bm s}^K} = ?$}}};
\draw [->,very thick,red] ([yshift=1em,xshift=-0.1em]l.north) -- ([yshift=1em,xshift=0.1em]h2.north) node [pos=0.5,above] {\scriptsize{求梯度{$\frac{\partial L}{\partial {\mathbi{h}}^K} = ?$}}};
\draw [->,very thick,red] ([yshift=1em,xshift=-0.1em]h2.north) -- ([yshift=1em,xshift=0.1em]s.north) node [pos=0.5,above] {\scriptsize{求梯度{$\frac{\partial f^K({\mathbi{s}}^K)}{\partial {\mathbi{s}}^K} = ?$}}};
\draw [-,very thick,red] ([yshift=0.5em]l.north) -- ([yshift=1.5em]l.north);
\draw [-,very thick,red] ([yshift=0.5em]h2.north) -- ([yshift=1.5em]h2.north);
\draw [-,very thick,red] ([yshift=0.5em]s.north) -- ([yshift=1.5em]s.north);
......
%%%------------------------------------------------------------------------------------------------------------
\begin{tikzpicture}
\begin{scope}
\node [anchor=center,minimum height=1.7em,fill=yellow!20,draw] (h) at (0,0) {${\bm h}^{K-1}$};
\node [anchor=west,minimum height=1.7em,fill=blue!20,draw] (s) at ([xshift=6.0em]h.east) {${\bm s}^{K}$};
\node [anchor=center,minimum height=1.7em,fill=yellow!20,draw] (h) at (0,0) {${\mathbi{h}}^{K-1}$};
\node [anchor=west,minimum height=1.7em,fill=blue!20,draw] (s) at ([xshift=6.0em]h.east) {${\mathbi{s}}^{K}$};
\draw [->] (h.east) -- (s.west);
\node [anchor=south west,inner sep=2pt] (step100) at ([xshift=0.5em,yshift=-0.8em]h.north east) {\scriptsize{${\bm s}^K = {\bm h}^{K-1} {\bm W}^K$}};
\node [anchor=south west,inner sep=2pt] (step100) at ([xshift=0.5em,yshift=-0.8em]h.north east) {\scriptsize{${\mathbi{s}}^K = {\mathbi{h}}^{K-1} {\mathbi{W}}^K$}};
\node [anchor=south west] (slabel) at ([yshift=1em,xshift=0.3em]s.north) {\scriptsize{\textbf{{已经得到:${\bm \pi}^K = \frac{\partial L}{\partial {\bm s}^K}$}}}};
\node [anchor=south west] (slabel) at ([yshift=1em,xshift=0.3em]s.north) {\scriptsize{\textbf{{已经得到:${\bm \pi}^K = \frac{\partial L}{\partial {\mathbi{s}}^K}$}}}};
\draw [->] ([yshift=0.3em]slabel.south) .. controls +(south:0.5) and +(north:0.5) .. ([xshift=0.5em,yshift=0.1em]s.north);
{
\draw [->,very thick,red] ([yshift=1em,xshift=-0.1em]s.north) -- ([yshift=1.0em,xshift=0.1em]h.north) node [pos=0.5,above] {\scriptsize{{$\frac{\partial L}{\partial {\bm W}^K} = ?$, $\frac{\partial L}{\partial {\bm h}^{K-1}} = ?$}}};
\draw [->,very thick,red] ([yshift=1em,xshift=-0.1em]s.north) -- ([yshift=1.0em,xshift=0.1em]h.north) node [pos=0.5,above] {\scriptsize{{$\frac{\partial L}{\partial {\mathbi{W}}^K} = ?$, $\frac{\partial L}{\partial {\mathbi{h}}^{K-1}} = ?$}}};
\draw [-,very thick,red] ([yshift=0.5em]h.north) -- ([yshift=1.5em]h.north);
\draw [-,very thick,red] ([yshift=0.5em]s.north) -- ([yshift=1.5em]s.north);
}
......
......@@ -8,7 +8,7 @@
\node [fill=orange!20,inner sep=0pt,minimum height=0.49cm,minimum width=0.49cm] at (\x,\y) {$\number\value{mycount1}$};
\addtocounter{mycount1}{1};
}
\node [anchor=south] (varlabel) at (0,0.6) {$\vectorn{\emph{s}}$};
\node [anchor=south] (varlabel) at (0,0.6) {$\mathbi{s}$};
\node [anchor=north] (labelc) at (0,-0.7) {\footnotesize{(a)}};
\end{scope}
......@@ -20,7 +20,7 @@
\node [fill=green!20,inner sep=0pt,minimum height=0.48cm,minimum width=0.48cm] at (\x,\y) {$1$};
\addtocounter{mycount1}{1};
}
\node [anchor=south] (varlabel) at (0,0.1) {$\vectorn{\emph{b}}$};
\node [anchor=south] (varlabel) at (0,0.1) {$\mathbi{b}$};
\node [anchor=north] (labelc) at (0,-0.7) {\footnotesize{(b)}};
\end{scope}
......@@ -34,7 +34,7 @@
\node [fill=orange!20,inner sep=0pt,minimum height=0.49cm,minimum width=0.49cm] at (\x,\y) {$\number\value{mycount1}$};
\addtocounter{mycount1}{1};
}
\node [anchor=south] (varlabel) at (0,0.6) {$\vectorn{\emph{s}}$};
\node [anchor=south] (varlabel) at (0,0.6) {$\mathbi{s}$};
\end{scope}
\begin{scope}[yshift=-1in,xshift=1.5in]
\setcounter{mycount1}{1}
......@@ -50,7 +50,7 @@
\addtocounter{mycount1}{1};
}
\node [anchor=center] (plabel) at (-4.5em,0) {\huge{$\mathbf{+}$}};
\node [anchor=south] (varlabel) at (0,0.6) {$\vectorn{\emph{b}}$};
\node [anchor=south] (varlabel) at (0,0.6) {$\mathbi{b}$};
\node [anchor=north] (labelc) at (0,-0.7) {\footnotesize{(c)}};
\end{scope}
\begin{scope}[yshift=-1in,xshift=3in]
......@@ -62,7 +62,7 @@
\addtocounter{mycount1}{1};
}
\node [anchor=center] (plabel) at (-4.5em,0) {\huge{$\mathbf{=}$}};
\node [anchor=south] (varlabel) at (0,0.6) {${\vectorn{\emph{s}}}+\vectorn{\emph{b}}$};
\node [anchor=south] (varlabel) at (0,0.6) {$\mathbi{s}+\mathbi{b}$};
\end{scope}
......
%%%------------------------------------------------------------------------------------------------------------
\begin{tikzpicture}
{
\begin{scope}
\node [] (part1label) at (0,0) {\includegraphics[scale=0.22]{./Chapter9/Figures/figure-feature-engineering.jpg}};
\node [anchor=north,minimum width=2.0em,minimum height=1.5em] (label11) at ([xshift=0.3em,yshift=2em]part1label.north) {\large{特征工程+机器学习}};
\node [anchor=north,minimum width=2.0em,minimum height=1.5em] (label12) at ([xshift=-11em,yshift=-6.3em]part1label.north) {\normalsize{输入}};
\node [anchor=north,minimum width=2.0em,minimum height=1.5em] (label13) at ([xshift=-1em,yshift=-6.3em]part1label.north) {\normalsize{特征提取}};
\node [anchor=north,minimum width=2.0em,minimum height=1.5em] (label14) at ([xshift=6.9em,yshift=-6.3em]part1label.north) {\normalsize{分类}};
\node [anchor=north,minimum width=2.0em,minimum height=1.5em] (label15) at ([xshift=14.2em,yshift=-6.3em]part1label.north) {\normalsize{输出}};
\end{scope}
}
{
\begin{scope}[yshift=-1.5in]
\node [] (part1label2) at (0,0) {\includegraphics[scale=0.22]{./Chapter9/Figures/figure-deep-learning.jpg}};
\node [anchor=north,minimum width=2.0em,minimum height=1.5em] (label21) at ([xshift=0.2em,yshift=1.2em]part1label2.north) {\large{深度学习(端到端学习)}};
\node [anchor=north,minimum width=2.0em,minimum height=1.5em] (label22) at ([xshift=-11em,yshift=-6em]part1label2.north) {\normalsize{输入}};
\node [anchor=north,minimum width=2.0em,minimum height=1.5em] (label23) at ([xshift=3.0em,yshift=-6em]part1label2.north) {\normalsize{特征提取+分类}};
\node [anchor=north,minimum width=2.0em,minimum height=1.5em] (label24) at ([xshift=14.2em,yshift=-6em]part1label2.north) {\normalsize{输出}};
\end{scope}
}
\end{tikzpicture}
%%%------------------------------------------------------------------------------------------------------------
......@@ -10,7 +10,7 @@
\node [anchor=east] (x0) at ([xshift=-6em]neuron00.west) {$x_1$};
\node [anchor=east] (x1) at ([xshift=-6em]neuron01.west) {$x_2$};
\node [anchor=east] (x2) at ([xshift=-6em]neuron02.west) {${\vectorn{\emph{b}}}$};
\node [anchor=east] (x2) at ([xshift=-6em]neuron02.west) {$\mathbi{b}$};
\node [anchor=west] (y0) at ([xshift=4em]neuron00.east) {$y_1$\scriptsize{温度}};
......

130 KB | W: | H:

92.1 KB | W: | H:

Chapter9/Figures/figure-deep-learning.jpg
Chapter9/Figures/figure-deep-learning.jpg
Chapter9/Figures/figure-deep-learning.jpg
Chapter9/Figures/figure-deep-learning.jpg
  • 2-up
  • Swipe
  • Onion skin
%%%------------------------------------------------------------------------------------------------------------
\begin{tikzpicture}
\begin{scope}
\node [anchor=center,inner sep=2pt] (e) at (0,0) {\small{$\vectorn{\emph{e}}=\vectorn{\emph{o}}$}};
\node [anchor=west,inner sep=2pt] (c) at (e.east) {\small{$\vectorn{\emph{C}}$}};
\node [anchor=center,inner sep=2pt] (e) at (0,0) {\small{$\mathbi{e}=\mathbi{o}$}};
\node [anchor=west,inner sep=2pt] (c) at (e.east) {\small{$\mathbi{C}$}};
\begin{pgfonlayer}{background}
\node [rectangle,inner sep=0.4em,draw,fill=blue!20!white] [fit = (e) (c)] (box) {};
\end{pgfonlayer}
\draw [->,thick] ([yshift=-1em]box.south)--([yshift=-0.1em]box.south) node [pos=0,below] (bottom1) {\small{单词$w$的one-hot表示}};
\draw [->,thick] ([yshift=0.1em]box.north)--([yshift=1em]box.north) node [pos=1,above] (top1) {\scriptsize{$\vectorn{\emph{e}}$=(8,.2,-1,.9,...,1)}};
\node [anchor=north] (bottom2) at ([yshift=0.3em]bottom1.south) {\scriptsize{$\vectorn{\emph{o}}$=(0,0,1,0,...,0)}};
\draw [->,thick] ([yshift=0.1em]box.north)--([yshift=1em]box.north) node [pos=1,above] (top1) {\scriptsize{$\mathbi{e}$=(8,.2,-1,.9,...,1)}};
\node [anchor=north] (bottom2) at ([yshift=0.3em]bottom1.south) {\scriptsize{$\mathbi{o}$=(0,0,1,0,...,0)}};
\node [anchor=south] (top2) at ([yshift=-0.3em]top1.north) {\small{单词$w$的分布式表示}};
{
\node [anchor=north west,fill=red!20!white] (cmatrix) at ([xshift=3em,yshift=1.0em]c.north east) {\scriptsize{$\begin{pmatrix} 1 & .2 & -.2 & 8 & ... & 0 \\ .6 & .8 & -2 & 1 & ... & -.2 \\ 8 & .2 & -1 & .9 & ... & 2.3 \\ 1 & 1.2 & -.9 & 3 & ... & .2 \\ ... & ... & ... & ... & ... & ... \\ 1 & .3 & 3 & .9 & ... & 5.1 \end{pmatrix}$}};
\node [anchor=west,inner sep=2pt,fill=red!30!white] (c) at (e.east) {\small{$\vectorn{\emph{C}}$}};
\node [anchor=west,inner sep=2pt,fill=red!30!white] (c) at (e.east) {\small{$\mathbi{C}$}};
\draw [<-,thick] (c.east) -- ([xshift=3em]c.east);
}
{
\node [anchor=south,draw,fill=green!20!white] (e2) at ([yshift=1.5em]cmatrix.north) {\scriptsize{外部词嵌入系统得到的$\vectorn{\emph{C}}$}};
\node [anchor=south,draw,fill=green!20!white] (e2) at ([yshift=1.5em]cmatrix.north) {\scriptsize{外部词嵌入系统得到的$\mathbi{C}$}};
\draw [->,very thick,dashed] (e2.south) -- (cmatrix.north);
}
......

132 KB | W: | H:

93.1 KB | W: | H:

Chapter9/Figures/figure-feature-engineering.jpg
Chapter9/Figures/figure-feature-engineering.jpg
Chapter9/Figures/figure-feature-engineering.jpg
Chapter9/Figures/figure-feature-engineering.jpg
  • 2-up
  • Swipe
  • Onion skin
......@@ -11,7 +11,7 @@
\draw [-,ublue] (n11.west) -- (n11.east);
\node [anchor=north] (x1) at ([yshift=-6em]n11.south) {$x_1$};
\node [anchor=north] (labela) at ([xshift=3.5em,yshift=-0.5em]x1.south) {\footnotesize{(a) 拟合一小段函数}};
\node [anchor=north] (b) at ([yshift=-6em]n10.south) {$\vectorn{\emph{b}}$};
\node [anchor=north] (b) at ([yshift=-6em]n10.south) {$\mathbi{b}$};
{
\draw [->,thick,red] (b.north) -- ([yshift=-0.1em]n10.south);
\draw [->,thick,ugreen] (x1.north) -- ([yshift=-0.1em]n10.290);
......@@ -93,7 +93,7 @@
\draw [-,ublue] (n11.west) -- (n11.east);
\node [anchor=north] (x1) at ([yshift=-6em]n11.south) {$x_1$};
\node [anchor=north] (labelb) at ([xshift=6em,yshift=-0.5em]x1.south) {\footnotesize{(b) 拟合更大一段函数}};
\node [anchor=north] (b) at ([yshift=-6em]n10.south) {$\vectorn{\emph{b}}$};
\node [anchor=north] (b) at ([yshift=-6em]n10.south) {$\mathbi{b}$};
{
\draw [->,thick,red] (b.north) -- ([yshift=-0.1em]n10.south);
\draw [->,thick,ugreen] (x1.north) -- ([yshift=-0.1em]n10.290);
......
%%%------------------------------------------------------------------------------------------------------------
\begin{tikzpicture}
\begin{scope}
\node [anchor=center,draw,fill=red!20,minimum height=1.8em,minimum width=2.5em] (h) at (0,0) {${\vectorn{\emph{h}}}^{k-1}$};
\node [anchor=west,draw,fill=blue!20,minimum height=1.8em,minimum width=2.5em] (s) at ([xshift=6em]h.east) {${\vectorn{\emph{s}}}^{k}$};
\node [anchor=west,draw,fill=green!20,minimum height=1.8em,minimum width=2.5em] (h2) at ([xshift=6em]s.east) {${\vectorn{\emph{h}}}^{k}$};
\node [anchor=center,draw,fill=red!20,minimum height=1.8em,minimum width=2.5em] (h) at (0,0) {${\mathbi{h}}^{k-1}$};
\node [anchor=west,draw,fill=blue!20,minimum height=1.8em,minimum width=2.5em] (s) at ([xshift=6em]h.east) {${\mathbi{s}}^{k}$};
\node [anchor=west,draw,fill=green!20,minimum height=1.8em,minimum width=2.5em] (h2) at ([xshift=6em]s.east) {${\mathbi{h}}^{k}$};
\node [anchor=east] (prev) at ([xshift=-2em]h.west) {...};
\node [anchor=west] (next) at ([xshift=2em]h2.east) {...};
\draw [->,thick] ([xshift=0.1em]prev.east) -- ([xshift=-0.1em]h.west);
\draw [->,thick] ([xshift=0.1em]h.east) -- ([xshift=-0.1em]s.west) node [pos=0.5,above] {\scriptsize{${\vectorn{\emph{s}}}^k = {\vectorn{\emph{h}}}^{k-1}{\vectorn{\emph{W}}}^k$}};
\draw [->,thick] ([xshift=0.1em]s.east) -- ([xshift=-0.1em]h2.west) node [pos=0.5,above] {\scriptsize{${\vectorn{\emph{h}}}^k = f^k({\vectorn{\emph{s}}}^{k})$}};
\draw [->,thick] ([xshift=0.1em]h.east) -- ([xshift=-0.1em]s.west) node [pos=0.5,above] {\scriptsize{${\mathbi{s}}^k = {\mathbi{h}}^{k-1}{\mathbi{W}}^k$}};
\draw [->,thick] ([xshift=0.1em]s.east) -- ([xshift=-0.1em]h2.west) node [pos=0.5,above] {\scriptsize{${\mathbi{h}}^k = f^k({\mathbi{s}}^{k})$}};
\draw [->,thick] ([xshift=0.1em]h2.east) -- ([xshift=-0.1em]next.west);
......
%%%------------------------------------------------------------------------------------------------------------
\begin{tikzpicture}
\begin{scope}
\node [anchor=center,minimum height=1.7em,fill=yellow!20,draw] (h) at (0,0) {${\bm h}^{K-1}$};
\node [anchor=west,minimum height=1.7em,fill=blue!20,draw] (s) at ([xshift=5.5em]h.east) {${\bm s}^{K}$};
\node [anchor=west,minimum height=1.7em,fill=green!20,draw] (h2) at ([xshift=5.5em]s.east) {${\bm h}^{K}$};
\node [anchor=center,minimum height=1.7em,fill=yellow!20,draw] (h) at (0,0) {$\mathbi{h}^{K-1}$};
\node [anchor=west,minimum height=1.7em,fill=blue!20,draw] (s) at ([xshift=5.5em]h.east) {$\mathbi{s}^{K}$};
\node [anchor=west,minimum height=1.7em,fill=green!20,draw] (h2) at ([xshift=5.5em]s.east) {$\mathbi{h}^{K}$};
\node [anchor=west,minimum height=1.7em,fill=orange!20,draw] (l) at ([xshift=5.5em]h2.east) {$L$};
\draw [->] (h.east) -- (s.west);
\draw [->] (s.east) -- (h2.west);
\draw [->] (h2.east) -- (l.west) node [pos=0.5,above] {\tiny{损失}};
\node [anchor=south west,inner sep=2pt] (step100) at ([xshift=0.2em,yshift=-0.8em]h.north east) {\tiny{${\bm s}^K = {\bm h}^{K-1} {\bm W}^K$}};
\node [anchor=south west,inner sep=2pt] (step100) at ([xshift=0.2em,yshift=-0.8em]h.north east) {\tiny{$\mathbi{s}^K = \mathbi{h}^{K-1} \mathbi{W}^K$}};
\node [anchor=south west,inner sep=2pt] (step101) at ([xshift=1em]step100.north west) {\tiny{线性变换}};
\node [anchor=south west,inner sep=2pt] (step200) at ([xshift=0.5em,yshift=-0.8em]s.north east) {\tiny{${\bm h}^K = f^K({\bm s}^K)$}};
\node [anchor=south west,inner sep=2pt] (step200) at ([xshift=0.5em,yshift=-0.8em]s.north east) {\tiny{$\mathbi{h}^K = f^K(\mathbi{s}^K)$}};
\node [anchor=south west,inner sep=2pt] (step201) at ([xshift=1em]step200.north west) {\tiny{激活函数}};
\node [anchor=south,inner sep=1pt] (outputlabel) at ([yshift=0.0em]h2.north) {\tiny{\textbf{输出层}}};
......
......@@ -19,7 +19,7 @@
\node [anchor=north] (x\n) at ([yshift=-2em]neuron0\n.south) {$x_\n$};
}
\node [anchor=west] (w1label) at ([xshift=-0.5em,yshift=0.8em]x5.north east) {${\vectorn{\emph{W}}}^{[1]}$};
\node [anchor=west] (w1label) at ([xshift=-0.5em,yshift=0.8em]x5.north east) {${\mathbi{W}}^{[1]}$};
\begin{pgfonlayer}{background}
\node [rectangle,inner sep=0.2em,fill=red!20] [fit = (neuron01) (neuron05)] (layer01) {};
......@@ -47,7 +47,7 @@
}
}
\node [anchor=west] (w2label) at ([xshift=-2.5em,yshift=5.4em]x5.north east) {${\vectorn{\emph{W}}}^{[2]}$};
\node [anchor=west] (w2label) at ([xshift=-2.5em,yshift=5.4em]x5.north east) {${\mathbi{W}}^{[2]}$};
\begin{pgfonlayer}{background}
{
......@@ -77,7 +77,7 @@
\draw [<-,thick] ([yshift=1.1em]neuron2\n.north) -- (neuron2\n.north);
}
\node [anchor=west] (w3label) at ([xshift=-2.5em,yshift=9.5em]x5.north east) {${\vectorn{\emph{W}}}^{[3]}$};
\node [anchor=west] (w3label) at ([xshift=-2.5em,yshift=9.5em]x5.north east) {${\mathbi{W}}^{[3]}$};
\begin{pgfonlayer}{background}
{
......
......@@ -5,7 +5,7 @@
% parameter server + processor
\begin{scope}[]
{\scriptsize
{\footnotesize
\tikzstyle{parametershard} = [draw,thick,minimum width=4em,align=left,rounded corners=2pt]
......@@ -13,31 +13,31 @@
\node[parametershard,anchor=west,fill=yellow!10] (param1) at (0,0) {${\bm \theta}_o$};
\node (param2) at ([xshift=1em]param1.east) {};
\node[parametershard,anchor=west,fill=red!10] (param3) at ([xshift=1em]param2.east) {${\bm \theta}_h$};
\node[anchor=south,inner sep=1pt] (serverlabel) at ([yshift=0.2em]param2.north) {\footnotesize{\textbf{参数服务器}: ${\bm \theta}_{\textrm{new}} = {\bm \theta} - \alpha\cdot \frac{\partial J}{\partial {\bm \theta}}$}};
\node[anchor=south,inner sep=1pt] (serverlabel) at ([yshift=0.8em]param2.north) {\small{\textbf{参数服务器}: ${\bm \theta}_{\textrm {new}} = {\bm \theta} - \alpha\cdot \frac{\partial J}{\partial {\bm \theta}}$}};
}
\begin{pgfonlayer}{background}
{
\node[rectangle,draw,thick,inner sep=2pt,fill=gray!20] [fit = (param1) (param2) (param3) (serverlabel)] (serverbox) {};
\node[rectangle,draw,thick,inner sep=2pt,fill=gray!20] [fit = (param1) (param2) (param3) (serverlabel) ] (serverbox) {};
}
\end{pgfonlayer}
\tikzstyle{processor} = [draw,thick,fill=orange!20,minimum width=4em,align=left,rounded corners=2pt]
{
\node [processor,anchor=north,align=center] (processor2) at ([yshift=-1.2in]serverlabel.south) {\scriptsize{处理器 2}\\\scriptsize{(G2)}};
\node [anchor=north] (labela) at ([xshift=4em,yshift=-1em]processor2.south) {\footnotesize {(a)同步更新}};
\node [processor,anchor=east,align=center] (processor1) at ([xshift=-1em]processor2.west) {\scriptsize{处理器 1}\\\scriptsize{(G1)}};
\node [processor,anchor=west,align=center] (processor3) at ([xshift=1em]processor2.east) {\scriptsize{处理器 3}\\\scriptsize{(G3)}};
\node [processor,anchor=north,align=center] (processor2) at ([yshift=-1.2in]serverlabel.south) {\footnotesize{处理器 2}\\\footnotesize{(G2)}};
\node [anchor=north] (labela) at ([xshift=6.5em,yshift=-2em]processor2.south) {\small {(a)同步更新}};
\node [processor,anchor=east,align=center] (processor1) at ([xshift=-1em]processor2.west) {\footnotesize{处理器 1}\\\footnotesize{(G1)}};
\node [processor,anchor=west,align=center] (processor3) at ([xshift=1em]processor2.east) {\footnotesize{处理器 3}\\\footnotesize{(G3)}};
}
{
\draw[->,very thick,red] ([xshift=-0.5em,yshift=2pt]processor2.north) -- ([xshift=-0.5em,yshift=-2pt]serverbox.south) node [pos=0.5,align=right,xshift=-2em] (pushlabel) {\scriptsize{$\frac{\partial J}{\partial{\bm \theta}}$}};;
\draw[<-,very thick,blue] ([xshift=0.5em,yshift=2pt]processor2.north) -- ([xshift=0.5em,yshift=-2pt]serverbox.south) node [pos=0.5,align=left,xshift=2.2em] (fetchlabel) {\scriptsize{${\bm \theta}_{\textrm{new}}$}};;;
\draw[->,very thick,red] ([xshift=-0.5em,yshift=2pt]processor2.north) -- ([xshift=-0.5em,yshift=-2pt]serverbox.south) node [pos=0.5,align=right,xshift=-2em] (pushlabel) {\footnotesize{$\frac{\partial J}{\partial{\bm \theta}}$}};;
\draw[<-,very thick,blue] ([xshift=0.5em,yshift=2pt]processor2.north) -- ([xshift=0.5em,yshift=-2pt]serverbox.south) node [pos=0.5,align=left,xshift=2.2em] (fetchlabel) {\footnotesize{${\bm \theta}_{\textrm{new}}$}};;;
\draw[->,very thick,red] ([xshift=-0.5em,yshift=2pt]processor3.north) --
([xshift=3em,yshift=-2pt]serverbox.south);
\draw[<-,very thick,blue] ([xshift=0.5em,yshift=2pt]processor3.north) -- ([xshift=4em,yshift=-2pt]serverbox.south) node [pos=0.5,align=left,xshift=2.2em] (fetchlabel) {\scriptsize{fetch (F)}};
\draw[->,very thick,red] ([xshift=-0.5em,yshift=2pt]processor1.north) -- ([xshift=-4em,yshift=-2pt]serverbox.south) node [pos=0.5,align=right,xshift=-2em] (pushlabel) {\scriptsize{push (P)}};
\draw[<-,very thick,blue] ([xshift=0.5em,yshift=2pt]processor3.north) -- ([xshift=4em,yshift=-2pt]serverbox.south) node [pos=0.49,align=left,xshift=2.2em] (fetchlabel) {\footnotesize{fetch (F)}};
\draw[->,very thick,red] ([xshift=-0.5em,yshift=2pt]processor1.north) -- ([xshift=-4em,yshift=-2pt]serverbox.south) node [pos=0.5,align=right,xshift=-2em] (pushlabel) {\footnotesize{push (P)}};
\draw[<-,very thick,blue] ([xshift=0.5em,yshift=2pt]processor1.north) -- ([xshift=-3em,yshift=-2pt]serverbox.south);
}
......@@ -45,18 +45,18 @@
% synchronous mode
\tikzstyle{job} = [draw,rotate=90,minimum height=0.25in]
\scriptsize{
\footnotesize{
{
\node[job,anchor=south west,fill=blue!50] (fetch11) at ([xshift=6em,yshift=1em]processor3.east) {\textbf{F}};
\node[job,anchor=west,fill=orange!30] (minibatch11) at ([yshift=1pt]fetch11.east) {\tiny{minibatch3}};
\node[job,anchor=south west,fill=blue!50] (fetch11) at ([xshift=6em,yshift=-0.2em]processor3.east) {\textbf{F}};
\node[job,anchor=west,fill=orange!30] (minibatch11) at ([yshift=1pt]fetch11.east) {\scriptsize{minibatch3}};
\node[job,anchor=west,fill=red!50] (push11) at ([yshift=1pt]minibatch11.east) {\textbf{P}};
\node[job,anchor=north west,fill=blue!50] (fetch12) at ([xshift=0.8em]fetch11.south west) {\textbf{F}};
\node[job,anchor=west,fill=orange!30] (minibatch12) at ([yshift=1pt]fetch12.east) {\tiny{minibatch2}};
\node[job,anchor=west,fill=orange!30] (minibatch12) at ([yshift=1pt]fetch12.east) {\scriptsize{minibatch2}};
\node[job,anchor=west,fill=red!50] (push12) at ([yshift=1pt]minibatch12.east) {\textbf{P}};
\node[job,anchor=north west,fill=blue!50] (fetch13) at ([xshift=0.8em]fetch12.south west) {\textbf{F}};
\node[job,anchor=west,fill=orange!30,minimum width=8em] (minibatch13) at ([yshift=1pt]fetch13.east) {\scriptsize{minibatch1}};
\node[job,anchor=west,fill=orange!30,minimum width=8em] (minibatch13) at ([yshift=1pt]fetch13.east) {\footnotesize{minibatch1}};
\node[job,anchor=west,fill=red!50] (push13) at ([yshift=1pt]minibatch13.east) {\textbf{P}};
\node[anchor=south west,draw,fill=gray!20,minimum width=8.0em] (update11) at ([yshift=4.0em]push11.north east) {更新};
\node[anchor=south west,draw,fill=gray!20,minimum width=8.0em] (update11) at ([yshift=3.6em]push11.north east) {更新};
\node[anchor=north] (G11) at (fetch11.west) {\small{G3}};
\node[anchor=north] (G12) at (fetch12.west) {\small{G2}};
......@@ -68,12 +68,12 @@
{
\draw [<->,thin,dotted] ([xshift=-1pt]minibatch11.north) .. controls +(west:3em) and +(east:3em) .. ([xshift=1pt]processor3.east);
\draw [<->,thin,dotted] ([xshift=-1pt]fetch11.north) .. controls +(west:4em) and +(east:4em) .. ([xshift=-0.5em,yshift=0.3in]processor3.north);
\draw [<->,thin,dotted] ([xshift=-1pt]push11.north) -- ([xshift=-4em,yshift=0.8in]processor3.north);
\draw [<->,thin,dotted] ([xshift=-1pt]fetch11.north) .. controls +(west:4em) and +(east:4em) .. ([xshift=0em,yshift=0.3in]processor3.north);
\draw [<->,thin,dotted] ([xshift=-1pt]push11.north) -- ([xshift=-2.2em,yshift=0.8in]processor3.north);
}
{
\draw [<->,thin,dotted] ([xshift=-1pt]update11.west) -- ([xshift=1pt,yshift=-1.5em]serverbox.north east);
\draw [<->,thin,dotted] ([xshift=-1pt]update11.west) -- ([xshift=1pt,yshift=-1.28em]serverbox.north east);
}
......@@ -83,7 +83,7 @@
\begin{scope}[yshift=-2.5in]
{\scriptsize
{\footnotesize
\tikzstyle{parametershard} = [draw,thick,minimum width=4em,align=left,rounded corners=2pt]
......@@ -91,7 +91,7 @@
\node[parametershard,anchor=west,fill=yellow!10] (param1) at (0,0) {${\bm \theta}_o$};
\node (param2) at ([xshift=1em]param1.east) {};
\node[parametershard,anchor=west,fill=red!10] (param3) at ([xshift=1em]param2.east) {${\bm \theta}_h$};
\node[anchor=south,inner sep=1pt] (serverlabel) at ([yshift=0.2em]param2.north) {\footnotesize{\textbf{参数服务器}: ${\bm \theta}_{\textrm {new}} = {\bm \theta} - \alpha\cdot \frac{\partial J}{\partial {\bm \theta}}$}};
\node[anchor=south,inner sep=1pt] (serverlabel) at ([yshift=0.8em]param2.north) {\small{\textbf{参数服务器}: ${\bm \theta}_{\textrm {new}} = {\bm \theta} - \alpha\cdot \frac{\partial J}{\partial {\bm \theta}}$}};
}
\begin{pgfonlayer}{background}
......@@ -103,19 +103,19 @@
\tikzstyle{processor} = [draw,thick,fill=orange!20,minimum width=4em,align=left,rounded corners=2pt]
{
\node [processor,anchor=north,align=center] (processor2) at ([yshift=-1.2in]serverlabel.south) {\scriptsize{处理器 2}\\\scriptsize{(G2)}};
\node [anchor=north] (label) at ([xshift=4em,yshift=-1em]processor2.south) {\footnotesize {(b)异步更新}};
\node [processor,anchor=east,align=center] (processor1) at ([xshift=-1em]processor2.west) {\scriptsize{处理器 1}\\\scriptsize{(G1)}};
\node [processor,anchor=west,align=center] (processor3) at ([xshift=1em]processor2.east) {\scriptsize{处理器 3}\\\scriptsize{(G3)}};
\node [processor,anchor=north,align=center] (processor2) at ([yshift=-1.2in]serverlabel.south) {\footnotesize{处理器 2}\\\footnotesize{(G2)}};
\node [anchor=north] (label) at ([xshift=6.5em,yshift=-2em]processor2.south) {\small {(b)异步更新}};
\node [processor,anchor=east,align=center] (processor1) at ([xshift=-1em]processor2.west) {\footnotesize{处理器 1}\\\footnotesize{(G1)}};
\node [processor,anchor=west,align=center] (processor3) at ([xshift=1em]processor2.east) {\footnotesize{处理器 3}\\\footnotesize{(G3)}};
}
{
\draw[->,very thick,red] ([xshift=-0.5em,yshift=2pt]processor2.north) -- ([xshift=-0.5em,yshift=-2pt]serverbox.south) node [pos=0.5,align=right,xshift=-2em] (pushlabel) {\scriptsize{$\frac{\partial J}{\partial {\bm \theta}}$}};;
\draw[<-,very thick,blue] ([xshift=0.5em,yshift=2pt]processor2.north) -- ([xshift=0.5em,yshift=-2pt]serverbox.south) node [pos=0.5,align=left,xshift=2.2em] (fetchlabel) {\scriptsize{${\bm \theta}_{\textrm{new}}$}};;;
\draw[->,very thick,red] ([xshift=-0.5em,yshift=2pt]processor2.north) -- ([xshift=-0.5em,yshift=-2pt]serverbox.south) node [pos=0.5,align=right,xshift=-2em] (pushlabel) {\footnotesize{$\frac{\partial J}{\partial {\bm \theta}}$}};;
\draw[<-,very thick,blue] ([xshift=0.5em,yshift=2pt]processor2.north) -- ([xshift=0.5em,yshift=-2pt]serverbox.south) node [pos=0.5,align=left,xshift=2.2em] (fetchlabel) {\footnotesize{${\bm \theta}_{\textrm{new}}$}};;;
\draw[->,very thick,red] ([xshift=-0.5em,yshift=2pt]processor3.north) --
([xshift=3em,yshift=-2pt]serverbox.south);
\draw[<-,very thick,blue] ([xshift=0.5em,yshift=2pt]processor3.north) -- ([xshift=4em,yshift=-2pt]serverbox.south) node [pos=0.5,align=left,xshift=2.2em] (fetchlabel) {\scriptsize{fetch (F)}};
\draw[->,very thick,red] ([xshift=-0.5em,yshift=2pt]processor1.north) -- ([xshift=-4em,yshift=-2pt]serverbox.south) node [pos=0.5,align=right,xshift=-2em] (pushlabel) {\scriptsize{push (P)}};
\draw[<-,very thick,blue] ([xshift=0.5em,yshift=2pt]processor3.north) -- ([xshift=4em,yshift=-2pt]serverbox.south) node [pos=0.49,align=left,xshift=2.2em] (fetchlabel) {\footnotesize{fetch (F)}};
\draw[->,very thick,red] ([xshift=-0.5em,yshift=2pt]processor1.north) -- ([xshift=-4em,yshift=-2pt]serverbox.south) node [pos=0.5,align=right,xshift=-2em] (pushlabel) {\footnotesize{push (P)}};
\draw[<-,very thick,blue] ([xshift=0.5em,yshift=2pt]processor1.north) -- ([xshift=-3em,yshift=-2pt]serverbox.south);
}
......@@ -123,19 +123,19 @@
% synchronous mode
\tikzstyle{job} = [draw,rotate=90,minimum height=0.25in]
\scriptsize{
\footnotesize{
{
\node[job,anchor=south west,fill=blue!50] (fetch21) at ([xshift=6em,yshift=1em]processor3.east) {\textbf{F}};
\node[job,anchor=west,fill=orange!30] (minibatch21) at ([yshift=1pt]fetch21.east) {\tiny{minibatch3}};
\node[job,anchor=south west,fill=blue!50] (fetch21) at ([xshift=6em,yshift=-0.3em]processor3.east) {\textbf{F}};
\node[job,anchor=west,fill=orange!30] (minibatch21) at ([yshift=1pt]fetch21.east) {\scriptsize{minibatch3}};
\node[job,anchor=west,fill=red!50] (push21) at ([yshift=1pt]minibatch21.east) {\textbf{P}};
\node[job,anchor=north west,fill=blue!50] (fetch22) at ([xshift=0.8em]fetch21.south west) {\textbf{F}};
\node[job,anchor=west,fill=orange!30] (minibatch22) at ([yshift=1pt]fetch22.east) {\tiny{minibatch2}};
\node[job,anchor=west,fill=orange!30] (minibatch22) at ([yshift=1pt]fetch22.east) {\scriptsize{minibatch2}};
\node[job,anchor=west,fill=red!50] (push22) at ([yshift=1pt]minibatch22.east) {\textbf{P}};
\node[job,anchor=north west,fill=blue!50] (fetch23) at ([xshift=0.8em]fetch22.south west) {\textbf{F}};
\node[job,anchor=west,fill=orange!30,minimum width=8em] (minibatch23) at ([yshift=1pt]fetch23.east) {\scriptsize{minibatch1}};
\node[job,anchor=west,fill=orange!30,minimum width=8em] (minibatch23) at ([yshift=1pt]fetch23.east) {\footnotesize{minibatch1}};
\node[job,anchor=west,fill=red!50] (push23) at ([yshift=1pt]minibatch23.east) {\textbf{P}};
\node[anchor=south west,draw,fill=gray!20,minimum width=0.59in] (update21) at ([yshift=2pt]push21.north east) {更新};
\node[anchor=south west,draw,fill=gray!20,minimum width=0.25in] (update22) at ([yshift=2pt]push23.north east) {\tiny{更新}};
\node[anchor=south west,draw,fill=gray!20,minimum width=0.25in] (update22) at ([yshift=2pt]push23.north east) {\scriptsize{更新}};
\node[anchor=north] (G21) at (fetch21.west) {\small{G3}};
\node[anchor=north] (G22) at (fetch22.west) {\small{G2}};
......@@ -143,7 +143,7 @@
\node[anchor=north,align=center] (synlabel) at (G22.south) {\small{\textbf{异步更新}}};
\draw[->,thick] ([xshift=1em]G23.east) -- ([xshift=1em,yshift=1.4in]G23.east) node [pos=0.5,rotate=90,yshift=-1em] {\small{时间轴}};
\draw [<->,thin,dotted] ([xshift=-1pt]update21.west) -- ([xshift=1pt,yshift=-1.5em]serverbox.north east);
\draw [<->,thin,dotted] ([xshift=-1pt]update21.west) -- ([xshift=1pt,yshift=-1.55em]serverbox.north east);
\draw [<->,thin,dotted] ([xshift=-1pt]update22.west) -- ([xshift=1pt,yshift=-1.5em]serverbox.north east);
}
}
......
......@@ -10,15 +10,15 @@
\node [anchor=north,draw,thick](node2)at ([yshift=-1.2em]node3.south){\small{weight layer}};
\draw[->,thick](node2.north)--([yshift=0.35em]node3.south);
\node[anchor=west](node2-1) at ([xshift=2.1em,yshift=1.2em]node2.east) {${\bm x}$};
\node[anchor=west](node2-1) at ([xshift=2.1em,yshift=1.2em]node2.east) {$\mathbi{x}$};
\node[anchor=north](node2-2) at ([xshift=0.2em,yshift=-0.3em]node2-1.south) {\footnotesize{$\rm{identity}$}};
\node [anchor=east](node4) at ([xshift=-0.2em]node2.west) {$\textrm{F}({\bm x})$};
\node [anchor=east](node5) at ([xshift=-0.3em]node3.west) {$\textrm{F}({\bm x})+{\bm x}$};
\node [anchor=east](node4) at ([xshift=-0.2em]node2.west) {$\textrm{F}(\mathbi{x})$};
\node [anchor=east](node5) at ([xshift=-0.3em]node3.west) {$\textrm{F}(\mathbi{x})+\mathbi{x}$};
\node [anchor=north](node1) at ([yshift=-1.8em]node2.south) {};
\draw[->,thick]([yshift=0.0em]node1.north)--(node2.south);
\node [anchor=east](node1-1) at ([xshift=1em,yshift=0.4em]node1.east) {${\bm x}$};
\node [anchor=east](node1-1) at ([xshift=1em,yshift=0.4em]node1.east) {$\mathbi{x}$};
\draw[->,thick]([xshift=-1.3em,yshift=0.8em]node1-1.east)--([xshift=2.7em,yshift=0.8em]node1-1.east)--([xshift=2.7em,yshift=5.35em]node1-1.east)--([xshift=-0.4em]node3.east);
......
......@@ -7,14 +7,14 @@
\node [anchor=west,rnnnode] (node13) at ([xshift=2em]node12.east) {\scriptsize{RNN Cell}};
\node [anchor=west,rnnnode] (node14) at ([xshift=2em]node13.east) {\scriptsize{RNN Cell}};
}
\node [anchor=north,rnnnode,fill=red!30!white] (e1) at ([yshift=-1.2em]node11.south) {\tiny{${\bm e}_1={\bm o}_1{\bm C}$}};
\node [anchor=north,rnnnode,fill=red!30!white] (e2) at ([yshift=-1.2em]node12.south) {\tiny{${\bm e}_2={\bm wo}_2{\bm C}$}};
\node [anchor=north,rnnnode,fill=red!30!white] (e3) at ([yshift=-1.2em]node13.south) {\tiny{${\bm e}_3={\bm o}_3{\bm C}$}};
\node [anchor=north,rnnnode,fill=red!30!white] (e4) at ([yshift=-1.2em]node14.south) {\tiny{${\bm e}_4={\bm o}_4{\bm C}$}};
\node [anchor=north] (w1) at ([yshift=-1em]e1.south) {\footnotesize{${\bm o}_1$}};
\node [anchor=north] (w2) at ([yshift=-1em]e2.south) {\footnotesize{${\bm o}_2$}};
\node [anchor=north] (w3) at ([yshift=-1em]e3.south) {\footnotesize{${\bm o}_3$}};
\node [anchor=north] (w4) at ([yshift=-1em]e4.south) {\footnotesize{${\bm o}_4$}};
\node [anchor=north,rnnnode,fill=red!30!white] (e1) at ([yshift=-1.2em]node11.south) {\tiny{${\mathbi{e}}_1={\mathbi{o}}_1{\mathbi{C}}$}};
\node [anchor=north,rnnnode,fill=red!30!white] (e2) at ([yshift=-1.2em]node12.south) {\tiny{${\mathbi{e}}_2={\mathbi{o}}_2{\mathbi{C}}$}};
\node [anchor=north,rnnnode,fill=red!30!white] (e3) at ([yshift=-1.2em]node13.south) {\tiny{${\mathbi{e}}_3={\mathbi{o}}_3{\mathbi{C}}$}};
\node [anchor=north,rnnnode,fill=red!30!white] (e4) at ([yshift=-1.2em]node14.south) {\tiny{${\mathbi{e}}_4={\mathbi{o}}_4{\mathbi{C}}$}};
\node [anchor=north] (w1) at ([yshift=-1em]e1.south) {\footnotesize{${\mathbi{o}}_1$}};
\node [anchor=north] (w2) at ([yshift=-1em]e2.south) {\footnotesize{${\mathbi{o}}_2$}};
\node [anchor=north] (w3) at ([yshift=-1em]e3.south) {\footnotesize{${\mathbi{o}}_3$}};
\node [anchor=north] (w4) at ([yshift=-1em]e4.south) {\footnotesize{${\mathbi{o}}_4$}};
\draw [->,thick] ([yshift=0.1em]w1.north)--([yshift=-0.1em]e1.south);
\draw [->,thick] ([yshift=0.1em]w2.north)--([yshift=-0.1em]e2.south);
......
......@@ -9,7 +9,7 @@
\node [anchor=center,neuronnode] (neuron02) at ([yshift=-3em]neuron01) {};
\node [anchor=east] (x0) at ([xshift=-6em]neuron00.west) {$x_1$};
\node [anchor=east] (x1) at ([xshift=-6em]neuron01.west) {$x_2$};
\node [anchor=east] (x2) at ([xshift=-6em]neuron02.west) {${\vectorn{\emph{b}}}$};
\node [anchor=east] (x2) at ([xshift=-6em]neuron02.west) {$\mathbi{b}$};
\node [anchor=west] (y0) at ([xshift=4em]neuron00.east) {$y_1$};
\draw [->] (x0.east) -- (neuron00.180) node [pos=0.1,above] {\tiny{$w_{11}$}};
......@@ -29,10 +29,10 @@
\draw [->] (x2.east) -- (neuron02.180) node [pos=0.3,below] {\tiny{$b_{3}$}};
\draw [->] (neuron02.east) -- (y2.west);
\node [anchor=east,align=left] (inputlabel) at ([xshift=-0.1em]x1.west) {\scriptsize{输入向量}:\\\small{${\vectorn{\emph{x}}}=(x_1,x_2)$}};
\node [anchor=east,align=left] (inputlabel) at ([xshift=-0.1em]x1.west) {\scriptsize{输入向量}:\\\small{$\mathbi{x}=(x_1,x_2)$}};
\node [anchor=west,align=left] (outputlabel) at ([xshift=0.1em]y1.east) {\scriptsize{输出向量}:\\\small{${\vectorn{\emph{y}}}=(y_1,y_2,y_3)$}};
\node [anchor=west,align=left] (outputlabel) at ([xshift=0.1em]y1.east) {\scriptsize{输出向量}:\\\small{$\mathbi{y}=(y_1,y_2,y_3)$}};
\begin{pgfonlayer}{background}
......
......@@ -49,7 +49,7 @@
\node [fill=red!20,inner sep=0pt,minimum height=0.49cm,minimum width=0.49cm] at (\x,\y) {$\number\value{mycount4}$};
\addtocounter{mycount4}{1};
}
\node [anchor=north] (xlabel) at (0,-1.2) {$\vectorn{\emph{x}}$};
\node [anchor=north] (xlabel) at (0,-1.2) {$\mathbi{x}$};
}
\node [anchor=south west] (label14) at (-1.3,0.9) {\footnotesize{\ding{175}}};
\end{scope}
......@@ -65,7 +65,7 @@
\node [fill=black!20,inner sep=0pt,minimum height=0.49cm,minimum width=0.49cm] at (0.25,0.25) {\small{$-1$}};
\node [fill=black!20,inner sep=0pt,minimum height=0.49cm,minimum width=0.49cm] at (0.25,-0.25) {$1$};
\node [fill=black!20,inner sep=0pt,minimum height=0.49cm,minimum width=0.49cm] at (0.25,-0.75) {$0$};
\node [anchor=north] (xlabel) at (0,-1.2) {$\vectorn{\emph{W}}$};
\node [anchor=north] (xlabel) at (0,-1.2) {$\mathbi{W}$};
}
{\draw [->,thick,dashed] (-1.5in+2em+1.5em,-0.3) .. controls +(east:2) and +(west:1) .. (-0.55,0.8) node [pos=0.5,left] {\scriptsize{\textbf{矩阵乘}}};}
......@@ -129,7 +129,7 @@
}
\node [anchor=south west] (label24) at (-0.8,0.9) {\footnotesize{\ding{175}}};
{
\node [anchor=north] (xlabel) at (0,-1.2) {${\vectorn{\emph{x}}} \cdot {\vectorn{\emph{W}}}$};
\node [anchor=north] (xlabel) at (0,-1.2) {${\mathbi{x}} \cdot {\mathbi{W}}$};
\node [anchor=center] (elabel) at (-0.7in,0) {\Huge{$\textbf{=}$}};
}
\end{scope}
......
......@@ -19,7 +19,7 @@
\node[above] at ([xshift=2em,yshift=1em]a2.west){1};
\node[below] at ([xshift=-0.5em,yshift=0em]a2.west){-1};
\node [anchor=west] (x) at ([xshift=-3.5cm,yshift=2em]a2.north) {\scriptsize{
${\vectorn{\emph{W}}}=\begin{pmatrix}
$\mathbi{W}=\begin{pmatrix}
1&0&0\\
0&-1&0\\
0&0&1
......@@ -44,7 +44,7 @@
\node [anchor=west] (x) at ([xshift=-4cm,yshift=2em]a3.north) {\scriptsize{
${\vectorn{\emph{b}}}=\begin{pmatrix}
$\mathbi{b}=\begin{pmatrix}
0.5&0&0\\
0&0&0\\
0&0&0
......
......@@ -11,7 +11,7 @@
\draw [-,ublue] (n10.west) -- (n10.east);
\draw [-,ublue] (n11.west) -- (n11.east);
\node [anchor=north] (x1) at ([yshift=-4em]n11.south) {$x_1$};
\node [anchor=north] (b) at ([yshift=-4em]n10.south) {$\vectorn{\emph{b}}$};
\node [anchor=north] (b) at ([yshift=-4em]n10.south) {$\mathbi{b}$};
{
\draw [->,thick] (b.north) -- ([yshift=-0.1em]n10.south);
\draw [->,thick] (x1.north) -- ([yshift=-0.1em]n10.290);
......
......@@ -15,10 +15,10 @@
\draw [-,thick](part3.south)--(part4.north);
\node [anchor=north,minimum width=1.5em,minimum height=1.5em] (part4-2) at ([xshift=-1.2em,yshift=-0.2em]part4.south) {\scriptsize {$1\times 2$}};
\node [anchor=north,minimum width=4.0em,minimum height=1.5em] (part5) at ([yshift=-1.4em]part4.south) {\footnotesize {${\vectorn{\emph{a}}}$}};
\node [anchor=north,minimum width=4.0em,minimum height=1.5em] (part5) at ([yshift=-1.4em]part4.south) {\footnotesize {$\mathbi{a}$}};
\draw [-,thick](part4.south)--([yshift=-0.1em]part5.north);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\node [anchor=west,minimum width=2.0em,minimum height=1.5em] (part5-3) at ([xshift=-0.8em,yshift=0.2em]part5.east) {\footnotesize {${\vectorn{\emph{W}}}^{[2]}$}};
\node [anchor=west,minimum width=2.0em,minimum height=1.5em] (part5-3) at ([xshift=-0.8em,yshift=0.2em]part5.east) {\footnotesize {${\mathbi{W}}^{[2]}$}};
\node [anchor=west,minimum width=2.0em,minimum height=1.5em] (part5-4) at ([xshift=2.0em,yshift=0.0em]part5-3.east) {\footnotesize {$ b^{[2]}$}};
\draw[-,thick](part4.south)--([xshift=-0.5em]part5-3.north);
\draw[-,thick](part3.south)--(part5-4.north);
......@@ -38,11 +38,11 @@
\draw [-,thick](part7.south)--(part8.north);
\node [anchor=north,minimum width=1.5em,minimum height=1.5em] (part8-2) at ([xshift=-1.2em,yshift=-0.2em]part8.south) {\scriptsize{$1\times 3$}};
\node [anchor=north,minimum width=4.0em,minimum height=1.5em] (part9) at ([yshift=-1.4em]part8.south) {\footnotesize {${\vectorn{\emph{x}}}$}};
\node [anchor=north,minimum width=4.0em,minimum height=1.5em] (part9) at ([yshift=-1.4em]part8.south) {\footnotesize {$\mathbi{x}$}};
\draw [-,thick](part8.south)--([yshift=-0.1em]part9.north);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\node [anchor=west,minimum width=2.0em,minimum height=1.5em] (part9-3) at ([xshift=-0.8em,yshift=0.1em]part9.east) {\footnotesize {${\vectorn{\emph{W}}}^{[1]}$}};
\node [anchor=west,minimum width=2.0em,minimum height=1.5em] (part9-4) at ([xshift=2.0em,yshift=0.0em]part9-3.east) {\footnotesize {${\vectorn{\emph{b}}}^{[1]}$}};
\node [anchor=west,minimum width=2.0em,minimum height=1.5em] (part9-3) at ([xshift=-0.8em,yshift=0.1em]part9.east) {\footnotesize {${\mathbi{W}}^{[1]}$}};
\node [anchor=west,minimum width=2.0em,minimum height=1.5em] (part9-4) at ([xshift=2.0em,yshift=0.0em]part9-3.east) {\footnotesize {${\mathbi{b}}^{[1]}$}};
\draw[-,thick](part8.south)--([xshift=-0.5em]part9-3.north);
\draw[-,thick](part7.south)--(part9-4.north);
\node [anchor=south,minimum width=1.5em,minimum height=1.5em] (part9-3-1) at ([xshift=1.1em,yshift=-0.45em]part9-3.north) {\scriptsize {$3\times 2$}};
......
%%%------------------------------------------------------------------------------------------------------------
\begin{tikzpicture}
\tikzstyle{neuronnode} = [minimum size=2.2em,circle,draw,ublue,very thick,inner sep=1pt, fill=white,align=center,drop shadow={shadow xshift=0.1em,shadow yshift=-0.1em}]
\node [anchor=west,minimum width=2.0em,minimum height=1.5em] (bias10) at (0,0.05) {\footnotesize{${\vectorn{\emph{b}}}^{[1]}$}};
\node [anchor=west,minimum width=2.0em,minimum height=1.5em] (bias10) at (0,0.05) {\footnotesize{${\mathbi{b}}^{[1]}$}};
\node [anchor=west,minimum width=2.0em,minimum height=1.5em] (bias11) at ([xshift=-1.5em,yshift=-0.3em]bias10.south) {\footnotesize{偏置1}};
\node [anchor=center,rotate=13,fill=white,inner sep=1pt] (b11) at ([yshift=1.0em,xshift=1.8em]bias10.north) {\scriptsize{$b_{11}$}};
......
......@@ -129,27 +129,14 @@
\parinterval 端到端学习使机器学习不再依赖传统的特征工程方法,因此也不需要繁琐的数据预处理、特征选择、降维等过程,而是直接利用人工神经网络自动从输入数据中提取、组合更复杂的特征,大大提升了模型能力和工程效率。以图\ref{fig:9-2}中的图像分类为例,在传统方法中,图像分类需要很多阶段的处理。首先,需要提取一些手工设计的图像特征,在将其降维之后,需要利用SVM等分类算法对其进行分类。与这种多阶段的流水线似的处理流程相比,端到端深度学习只训练一个神经网络,输入就是图片的像素表示,输出直接是分类类别。
%------------------------------------------------------------------------------
\begin{figure}[htp]
\centering
\subfigcapskip=8pt
\subfigure[基于特征工程的机器学习方法做图像分类]{
\begin{minipage}{.9\textwidth}
\centering
\includegraphics[width=8cm]{./Chapter9/Figures/figure-feature-engineering.jpg}
\end{minipage}%
}
\vfill
\subfigure[端到端学习方法做图像分类]{
\begin{minipage}{.9\textwidth}
\centering
\includegraphics[width=8cm]{./Chapter9/Figures/figure-deep-learning.jpg}
\end{minipage}%
}
%----------------------------------------------
\begin{figure}[htp]
\centering
\input{./Chapter9/Figures/figure-compare}
\caption{特征工程{\small\sffamily\bfseries{vs}}端到端学习}
\label{fig:9-2}
\end {figure}
%------------------------------------------------------------------------------
\end{figure}
%----------------------------------------------
\parinterval 传统的机器学习需要人工定义特征,这个过程往往需要对问题的隐含假设。这种方法存在三方面的问题:
......
......@@ -5508,7 +5508,147 @@ pages ={157-166},
pages = {7057--7067},
year = {2019}
}
@inproceedings{DBLP:conf/aclnmt/HoangKHC18,
author = {Cong Duy Vu Hoang and
Philipp Koehn and
Gholamreza Haffari and
Trevor Cohn},
title = {Iterative Back-Translation for Neural Machine Translation},
pages = {18--24},
publisher = {Association for Computational Linguistics},
year = {2018}
}
@inproceedings{DBLP:conf/icml/OttAGR18,
author = {Myle Ott and
Michael Auli and
David Grangier and
Marc'Aurelio Ranzato},
title = {Analyzing Uncertainty in Neural Machine Translation},
volume = {80},
pages = {3953--3962},
publisher = {{PMLR}},
year = {2018}
}
@inproceedings{DBLP:conf/acl/FadaeeBM17a,
author = {Marzieh Fadaee and
Arianna Bisazza and
Christof Monz},
title = {Data Augmentation for Low-Resource Neural Machine Translation},
pages = {567--573},
publisher = {Association for Computational Linguistics},
year = {2017}
}
@inproceedings{finding2006adafre,
author = {S. F. Adafre and Maarten de Rijke},
title = {Finding Similar Sentences across Multiple Languages in Wikipedia },
publisher = {European Association of Computational Linguistics},
year = {2006}
}
@inproceedings{method2008keiji,
author = {Keiji Yasuda and Eiichiro Sumita},
title = {Method for building sentence-aligned corpus from wikipedia},
publisher = {AAAI Conference on Artificial Intelligence},
year = {2008}
}
@article{DBLP:journals/coling/MunteanuM05,
author = {Dragos Stefan Munteanu and
Daniel Marcu},
title = {Improving Machine Translation Performance by Exploiting Non-Parallel
Corpora},
journal = {Computational Linguistics},
volume = {31},
number = {4},
pages = {477--504},
year = {2005}
}
@inproceedings{DBLP:conf/naacl/SmithQT10,
author = {Jason R. Smith and
Chris Quirk and
Kristina Toutanova},
title = {Extracting Parallel Sentences from Comparable Corpora using Document
Level Alignment},
pages = {403--411},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2010}
}
@inproceedings{DBLP:conf/emnlp/ZhangZ16,
author = {Jiajun Zhang and
Chengqing Zong},
title = {Exploiting Source-side Monolingual Data in Neural Machine Translation},
pages = {1535--1545},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2016}
}
@inproceedings{DBLP:conf/acl/XiaKAN19,
author = {Mengzhou Xia and
Xiang Kong and
Antonios Anastasopoulos and
Graham Neubig},
title = {Generalized Data Augmentation for Low-Resource Translation},
pages = {5786--5796},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2019}
}
@inproceedings{DBLP:conf/emnlp/WangPDN18,
author = {Xinyi Wang and
Hieu Pham and
Zihang Dai and
Graham Neubig},
title = {SwitchOut: an Efficient Data Augmentation Algorithm for Neural Machine
Translation},
pages = {856--861},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2018}
}
@inproceedings{DBLP:conf/acl/GaoZWXQCZL19,
author = {Fei Gao and
Jinhua Zhu and
Lijun Wu and
Yingce Xia and
Tao Qin and
Xueqi Cheng and
Wengang Zhou and
Tie-Yan Liu},
title = {Soft Contextual Data Augmentation for Neural Machine Translation},
pages = {5539--5544},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2019}
}
@inproceedings{DBLP:conf/emnlp/WangLWLS19,
author = {Shuo Wang and
Yang Liu and
Chao Wang and
Huanbo Luan and
Maosong Sun},
title = {Improving Back-Translation with Uncertainty-based Confidence Estimation},
pages = {791--802},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2019}
}
@inproceedings{DBLP:conf/emnlp/WuWXQLL19,
author = {Lijun Wu and
Yiren Wang and
Yingce Xia and
Tao Qin and
Jianhuang Lai and
Tie-Yan Liu},
title = {Exploiting Monolingual Data at Scale for Neural Machine Translation},
pages = {4205--4215},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2019}
}
@inproceedings{DBLP:conf/emnlp/LiLHZZ19,
author = {Guanlin Li and
Lemao Liu and
Guoping Huang and
Conghui Zhu and
Tiejun Zhao},
title = {Understanding Data Augmentation in Neural Machine Translation: Two
Perspectives towards Generalization},
pages = {5688--5694},
publisher = {Annual Meeting of the Association for Computational Linguistics},
year = {2019}
}
%%%%% chapter 16------------------------------------------------------
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论