Encoder.h 2.68 KB
Newer Older
liyinqiao committed
1 2
/* NiuTrans.NMT - an open-source neural machine translation system.
 * Copyright (C) 2020 NiuTrans Research. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/*
 * $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-07-31
liyinqiao committed
19
 * $Modified by: HU Chi (huchinlp@gmail.com) 2020-04
20 21
 */

liyinqiao committed
22 23
#ifndef __ENCODER_H__
#define __ENCODER_H__
24

liyinqiao committed
25
#include "Utility.h"
26 27 28 29
#include "submodel/FNN.h"
#include "submodel/Attention.h"
#include "submodel/Embedding.h"
#include "submodel/LayerNorm.h"
30 31 32 33
#include "../../network/XNet.h"

using namespace nts;

liyinqiao committed
34
namespace nmt
35 36
{

liyinqiao committed
37 38
/*
base class of the encoder
39
*/
liyinqiao committed
40
class Encoder
41 42
{
public:
liyinqiao committed
43
    virtual XTensor Make(XTensor& input, XTensor* mask, XTensor& mask2, bool isTraining) = 0;
44 45
};

liyinqiao committed
46 47
/*
the encoder based on self-attention
48
*/
liyinqiao committed
49
class AttEncoder : Encoder
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
{
public:
    /* device id */
    int devID;

    /* layer number */
    int nlayer;

    /* hidden layer size of the FNN layer */
    int hSize;

    /* embedding size */
    int eSize;

    /* vocabulary size */
    int vSize;

liyinqiao committed
67 68 69
    /* dropout probability */
    DTYPE dropoutP;

xuchen committed
70 71 72 73
    /* some positions can be ignored in attention. this is useful in lm where the first position needs
       special design for the attention model. */
    int ignored;

74
    /* embedding of word at each position */
liyinqiao committed
75
    Embedder embedder;
76 77

    /* FNN model of each layer */
liyinqiao committed
78
    FNN* fnns;
79 80

    /* attention model of each layer */
liyinqiao committed
81 82 83 84
    Attention* selfAtt;

    /* layer normalizations for attention */
    LN* attLayerNorms;
85

86
    /* layer normalization for fnn */
liyinqiao committed
87
    LN* fnnLayerNorms;
88

liyinqiao committed
89 90
    /* layer normalization for encoder */
    LN* encoderLayerNorm;
91

liyinqiao committed
92 93
    /* the location of layer normalization */
    bool preNorm;
94 95 96 97 98 99 100 101 102

public:
    /* constructor */
    AttEncoder();

    /* de-constructor */
    ~AttEncoder();

    /* initialize the model */
liyinqiao committed
103
    void InitModel(Config& config);
104 105

    /* make the encoding network */
liyinqiao committed
106
    XTensor Make(XTensor& input, XTensor* mask, XTensor& maskEncDec, bool isTraining);
107

liyinqiao committed
108 109 110 111 112 113
    /* make the encoding network */
    XTensor MakeFast(XTensor& input, XTensor* mask, XTensor& maskEncDec, bool isTraining);

    /* make the encoding network (wrapper) */
    XTensor Make(XTensor& input, XTensor* mask, bool isTraining);
};
114 115 116 117

}

#endif