__init__.py 3.47 KB
Newer Older
xuchen committed
1 2 3 4 5 6 7 8 9 10 11
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""

from .adaptive_input import AdaptiveInput
from .adaptive_softmax import AdaptiveSoftmax
from .beamable_mm import BeamableMM
from .character_token_embedder import CharacterTokenEmbedder
from .convolution import ConvolutionModule
12
from .downsample_convolution import DownSampleConvolutionModule
xuchen committed
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
from .conv_tbc import ConvTBC
from .cross_entropy import cross_entropy
from .downsampled_multihead_attention import DownsampledMultiHeadAttention
from .dynamic_convolution import DynamicConv, DynamicConv1dTBC
from .dynamic_crf_layer import DynamicCRF
from .fairseq_dropout import FairseqDropout
from .fp32_group_norm import Fp32GroupNorm
from .gelu import gelu, gelu_accurate
from .grad_multiply import GradMultiply
from .gumbel_vector_quantizer import GumbelVectorQuantizer
from .kmeans_vector_quantizer import KmeansVectorQuantizer
from .layer_drop import LayerDropModuleList
from .layer_history import CreateLayerHistory, LearnableDenseLayerHistory
from .layer_norm import Fp32LayerNorm, LayerNorm
from .learned_positional_embedding import LearnedPositionalEmbedding
from .lightweight_convolution import LightweightConv, LightweightConv1dTBC
from .linearized_convolution import LinearizedConvolution
from .local_multihead_attention import LocalMultiheadAttention
from .multihead_attention import MultiheadAttention
from .positional_embedding import PositionalEmbedding
33
from .reduced_multihead_attention import ReducedMultiheadAttention
xuchen committed
34 35 36 37 38 39 40 41 42 43 44 45
from .rel_position_multihead_attention import RelPositionMultiheadAttention
from .relative_multihead_attention import RelativeMultiheadAttention
from .same_pad import SamePad
from .scalar_bias import ScalarBias
from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding
from .transformer_sentence_encoder_layer import TransformerSentenceEncoderLayer
from .transformer_sentence_encoder import TransformerSentenceEncoder
from .transpose_last import TransposeLast
from .unfold import unfold1d
from .transformer_layer import TransformerDecoderLayer, TransformerEncoderLayer
from .vggblock import VGGBlock
from .conformer_layer import ConformerEncoderLayer
xuchen committed
46
from .pds_layer import PDSTransformerEncoderLayer
xuchen committed
47 48 49 50 51 52 53 54 55 56 57

__all__ = [
    "AdaptiveInput",
    "AdaptiveSoftmax",
    "BeamableMM",
    "CharacterTokenEmbedder",
    "ConformerEncoderLayer",
    "ConvolutionModule",
    "ConvTBC",
    "CreateLayerHistory",
    "cross_entropy",
58
    "DownSampleConvolutionModule",
xuchen committed
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
    "DownsampledMultiHeadAttention",
    "DynamicConv1dTBC",
    "DynamicConv",
    "DynamicCRF",
    "FairseqDropout",
    "Fp32GroupNorm",
    "Fp32LayerNorm",
    "gelu",
    "gelu_accurate",
    "GradMultiply",
    "GumbelVectorQuantizer",
    "KmeansVectorQuantizer",
    "LayerDropModuleList",
    "LayerNorm",
    "LearnableDenseLayerHistory",
    "LearnedPositionalEmbedding",
    "LightweightConv1dTBC",
    "LightweightConv",
    "LinearizedConvolution",
    "LocalMultiheadAttention",
    "MultiheadAttention",
    "PositionalEmbedding",
xuchen committed
81
    "PDSTransformerEncoderLayer",
82
    "ReducedMultiheadAttention",
xuchen committed
83 84 85 86 87 88 89 90 91 92 93 94 95
    "RelPositionMultiheadAttention",
    "RelativeMultiheadAttention",
    "SamePad",
    "ScalarBias",
    "SinusoidalPositionalEmbedding",
    "TransformerSentenceEncoderLayer",
    "TransformerSentenceEncoder",
    "TransformerDecoderLayer",
    "TransformerEncoderLayer",
    "TransposeLast",
    "VGGBlock",
    "unfold1d",
]