Commit dcd23f27 by 孟霞

合并分支 'mengxia' 到 'caorunzhe'

Mengxia

查看合并请求 !312
parents aeefe507 22175aef
......@@ -2283,10 +2283,10 @@ Jobs was the CEO of {\red{\underline{apple}}}.
\begin{itemize}
\vspace{0.5em}
\item 端到端学习是神经网络方法的特点之一。这样,系统开发者不需要设计输入和输出的隐含结构,甚至连特征工程都不再需要。但是,另一方面,由于这种端到端学习完全由神经网络自行完成,整个学习过程没有人的先验知识做指导,导致学习的结构和参数很难进行解释。针对这个问题也有很多研究者进行{\small\sffamily\bfseries{可解释机器学习}}\index{可解释机器学习}(Explainable Machine Learning)\index{Explainable Machine Learning}的研究\upcite{guidotti2018survey}\upcite{koh2017understanding}。对于自然语言处理,方法的可解释性是十分必要的。从另一个角度说,如何使用先验知识改善端到端学习也是很多人关注的方向\upcite{arthur2016incorporating}\upcite{Zhang2017PriorKI},比如,如何使用句法知识改善自然语言处理模型\upcite{zollmann2006syntax}\upcite{charniak2003syntax}\upcite{stahlberg2016syntactically}
\item 端到端学习是神经网络方法的特点之一。这样,系统开发者不需要设计输入和输出的隐含结构,甚至连特征工程都不再需要。但是,另一方面,由于这种端到端学习完全由神经网络自行完成,整个学习过程没有人的先验知识做指导,导致学习的结构和参数很难进行解释。针对这个问题也有很多研究者进行{\small\sffamily\bfseries{可解释机器学习}}\index{可解释机器学习}(Explainable Machine Learning)\index{Explainable Machine Learning}的研究\upcite{DBLP:journals/corr/abs-1905-09418,moraffah2020causal,blodgett2020language,}。对于自然语言处理,方法的可解释性是十分必要的。从另一个角度说,如何使用先验知识改善端到端学习也是很多人关注的方向\upcite{arthur2016incorporating,zhang-etal-2017-prior,yang2017improving},比如,如何使用句法知识改善自然语言处理模型\upcite{stahlberg2016syntactically,currey2019incorporating,currey2018multi,marevcek2018extracting,blevins2018deep}
\vspace{0.5em}
\item 为了进一步提高神经语言模型性能,除了改进模型,还可以在模型中引入新的结构或是其他有效信息,该领域也有很多典型工作值得关注。例如在神经语言模型中引入除了词嵌入以外的单词特征,如语言特征(形态、语法、语义特征等)\upcite{Wu2012FactoredLM,Adel2015SyntacticAS}、上下文信息\upcite{mikolov2012context,Wang2015LargerContextLM}、知识图谱等外部知识\upcite{Ahn2016ANK};或是在神经语言模型中引入字符级信息,将其作为字符特征单独\upcite{Kim2016CharacterAwareNL,Hwang2017CharacterlevelLM}或与单词特征一起\upcite{Onoe2016GatedWR,Verwimp2017CharacterWordLL}送入模型中;在神经语言模型中引入双向模型也是一种十分有效的尝试,在单词预测时可以同时利用来自过去和未来的文本信息\upcite{Graves2013HybridSR,bahdanau2014neural,Peters2018DeepCW};在神经语言模型中引入注意力机制能够明显提高模型性能,1.5.2节对此有简短介绍,除了Transformer模型,GPT\upcite{radford2018improving}和BERT\upcite{devlin2019bert}也是不错的工作。
\item 为了进一步提高神经语言模型性能,除了改进模型,还可以在模型中引入新的结构或是其他有效信息,该领域也有很多典型工作值得关注。例如在神经语言模型中引入除了词嵌入以外的单词特征,如语言特征(形态、语法、语义特征等)\upcite{Wu2012FactoredLM,Adel2015SyntacticAS}、上下文信息\upcite{mikolov2012context,Wang2015LargerContextLM}、知识图谱等外部知识\upcite{Ahn2016ANK};或是在神经语言模型中引入字符级信息,将其作为字符特征单独\upcite{Kim2016CharacterAwareNL,Hwang2017CharacterlevelLM}或与单词特征一起\upcite{Onoe2016GatedWR,Verwimp2017CharacterWordLL}送入模型中;在神经语言模型中引入双向模型也是一种十分有效的尝试,在单词预测时可以同时利用来自过去和未来的文本信息\upcite{Graves2013HybridSR,bahdanau2014neural,Peters2018DeepCW};在神经语言模型中引入注意力机制能够明显提高模型性能,1.5.2节对此有简短介绍,除了Transformer模型,GPT\upcite{radford2018improving}和BERT\upcite{devlin2019bert}也是不错的工作。
\vspace{0.5em}
\item 词嵌入是自然语言处理近些年的重要进展。所谓“嵌入”是一类方法,理论上,把一个事物进行分布式表示的过程都可以被看作是广义上的“嵌入”。基于这种思想的表示学习也成为了自然语言处理中的前沿方法。比如,如何对树结构,甚至图结构进行分布式表示\upcite{plank2013embedding}\upcite{perozzi2014deepwalk}成为了分析自然语言的重要方法。此外,除了语言建模,还有很多方式可以进行词嵌入的学习,比如,SENNA\upcite{collobert2011natural}、word2vec\upcite{DBLP:journals/corr/abs-1301-3781}\upcite{mikolov2013distributed}、Glove\upcite{DBLP:conf/emnlp/PenningtonSM14}、CoVe\upcite{mccann2017learned} 等。
\item 词嵌入是自然语言处理近些年的重要进展。所谓“嵌入”是一类方法,理论上,把一个事物进行分布式表示的过程都可以被看作是广义上的“嵌入”。基于这种思想的表示学习也成为了自然语言处理中的前沿方法。比如,如何对树结构,甚至图结构进行分布式表示\upcite{DBLP:journals/corr/abs-1809-01854,Yin2018StructVAETL,Aharoni2017TowardsSN}成为了分析自然语言的重要方法。此外,除了语言建模,还有很多方式可以进行词嵌入的学习,比如,SENNA\upcite{collobert2011natural}、word2vec\upcite{DBLP:journals/corr/abs-1301-3781}\upcite{mikolov2013distributed}、Glove\upcite{DBLP:conf/emnlp/PenningtonSM14}、CoVe\upcite{mccann2017learned} 等。
\vspace{0.5em}
\end{itemize}
......@@ -4020,33 +4020,6 @@ year = {2012}
year = {2013},
}
@article{guidotti2018survey,
author = {Riccardo Guidotti and
Anna Monreale and
Salvatore Ruggieri and
Franco Turini and
Fosca Giannotti and
Dino Pedreschi},
title = {A Survey of Methods for Explaining Black Box Models},
journal = {ACM Computing Surveys},
volume = {51},
number = {5},
pages = {93:1--93:42},
year = {2019},
}
@inproceedings{koh2017understanding,
author = {Pang Wei Koh and
Percy Liang},
title = {Understanding Black-box Predictions via Influence Functions},
booktitle = {Proceedings of the 34th International Conference on Machine Learning,
{ICML} 2017, Sydney, NSW, Australia, 6-11 August 2017},
volume = {70},
pages = {1885--1894},
publisher = {{PMLR}},
year = {2017},
}
@inproceedings{arthur2016incorporating,
author = {Philip Arthur and
Graham Neubig and
......@@ -4060,13 +4033,6 @@ year = {2012}
year = {2016},
}
@INPROCEEDINGS{charniak2003syntax,
author = {Eugene Charniak and Kevin Knight and Kenji Yamada},
title = {Syntax-based Language Models for Statistical Machine Translation},
booktitle = {In MT Summit IX. Intl. Assoc. for Machine Translation},
year = {2003}
}
@inproceedings{stahlberg2016syntactically,
author = {Felix Stahlberg and
Eva Hasler and
......@@ -4234,6 +4200,100 @@ year = {2012}
year={2012}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%深度阅读修改和补充,待检查修改%%%%%%%%%%%%%%%%%%%
@article{moraffah2020causal,
title={Causal Interpretability for Machine Learning-Problems, Methods and Evaluation},
author={Raha Moraffah and
Mansooreh Karami and
Ruocheng Guo and
Adrienne Raglin and
Huan Liu},
journal={ACM SIGKDD Explorations Newsletter},
volume={22},
number={1},
pages={18--33},
year={2020},
publisher={ACM New York, NY, USA}
}
@article{blodgett2020language,
title={Language (Technology) is Power: A Critical Survey of" Bias" in NLP},
author={Su Lin Blodgett and
Solon Barocas and
Hal Daum{\'e} III and
Hanna M. Wallach},
journal={arXiv preprint arXiv:2005.14050},
year={2020}
}
@incollection{nguyen2019understanding,
title={Understanding neural networks via feature visualization: A survey},
author={Anh Nguyen and
Jason Yosinski and
Jeff Clune},
pages={55--76},
year={2019},
publisher={Explainable AI}
}
@inproceedings{yang2017improving,
title={Improving adversarial neural machine translation with prior knowledge},
author={Yating Yang and
Xiao Li and
Tonghai Jiang and
Jinying Kong and
Bo Ma and
Xi Zhou and
Lei Wang },
publisher={IEEE Global Conference on Signal and Information Processing},
pages={1373--1377},
year={2017}
}
@inproceedings{currey2019incorporating,
title={Incorporating source syntax into transformer-based neural machine translation},
author={Anna Currey and
Kenneth Heafield},
publisher={Proceedings of the Fourth Conference on Machine Translation},
pages={24--33},
year={2019}
}
@article{currey2018multi,
title={Multi-source syntactic neural machine translation},
author={Anna Currey and
Kenneth Heafield},
journal={arXiv preprint arXiv:1808.10267},
year={2018}
}
@inproceedings{marevcek2018extracting,
title={Extracting syntactic trees from transformer encoder self-attentions},
author={David Mare{\v{c}}ek
and Rudolf Rosa},
publisher={Conference on Empirical Methods in Natural Language Processing},
pages={347--349},
year={2018}
}
@article{blevins2018deep,
title={Deep rnns encode soft hierarchical syntax},
author={Blevins, Terra and Levy, Omer and Zettlemoyer, Luke},
journal={arXiv preprint arXiv:1805.04218},
year={2018}
}
@inproceedings{Yin2018StructVAETL,
title={StructVAE: Tree-structured Latent Variable Models for Semi-supervised Semantic Parsing},
author={Pengcheng Yin and
Chunting Zhou and
Junxian He and
Graham Neubig},
publisher={Annual Meeting of the Association for Computational Linguistics},
year={2018}
}
@article{Aharoni2017TowardsSN,
title={Towards String-To-Tree Neural Machine Translation},
author={Roee Aharoni and
Yoav Goldberg},
journal={arXiv preprint arXiv:1704.04743},
year={2017}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% chapter 9------------------------------------------------------
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
......@@ -4870,17 +4930,15 @@ pages ={157-166},
year={2016}
}
@inproceedings{zhang-etal-2017-prior,
title = "Prior Knowledge Integration for Neural Machine Translation using Posterior Regularization",
author = "Zhang, Jiacheng and
title = {Prior Knowledge Integration for Neural Machine Translation using Posterior Regularization},
author = {Zhang, Jiacheng and
Liu, Yang and
Luan, Huanbo and
Xu, Jingfang and
Sun, Maosong",
month = jul,
year = "2017",
address = "Vancouver, Canada",
publisher = "Association for Computational Linguistics",
pages = "1514--1523",
Sun, Maosong},
year = {2017},
publisher = {Association for Computational Linguistics},
pages = {1514--1523},
}
@inproceedings{duan-etal-2020-bilingual,
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论