train-subset: train
valid-subset: valid

max-epoch: 50
max-update: 100000

num-workers: 8
patience: 10
no-progress-bar: True
log-interval: 100
seed: 1
report-accuracy: True
skip-invalid-size-inputs-valid-test: True

#load-pretrained-encoder-from:
#load-pretrained-decoder-from:

arch: transformer
share-decoder-input-output-embed: True
optimizer: adam
clip-norm: 10.0
lr-scheduler: inverse_sqrt
warmup-init-lr: 1e-7
warmup-updates: 8000
lr: 1e-3
adam_betas: (0.9,0.997)

criterion: label_smoothed_cross_entropy
label_smoothing: 0.1

dropout: 0.1
attention-dropout: 0.1
activation-dropout: 0.1

activation-fn: relu
encoder-normalize-before: True
decoder-normalize-before: True
encoder-embed-dim: 256
encoder-ffn-embed-dim: 2048
encoder-layers: 6
decoder-layers: 6
encoder-attention-heads: 4

decoder-embed-dim: 256
decoder-ffn-embed-dim: 2048
decoder-attention-heads: 4