Commit 7cb7e508 by xuchen

optimize the shell script

parent be9c1ab4
train-subset: train_st train-subset: train-clean-100,train-clean-360,train-other-500
valid-subset: dev_st valid-subset: dev-clean
max-epoch: 50 max-epoch: 100
max-update: 100000 max-update: 300000
num-workers: 8 num-workers: 8
patience: 10 patience: 10
......
arch: pys2t_transformer_s
encoder-embed-dim: 512
pyramid-stages: 4
pyramid-layers: 2_3_5_2
pyramid-sr-ratios: 2_2_2_2
pyramid-embed-dims: 64_128_256_512
pyramid-reduced-embed: conv
pyramid-embed-norm: True
pyramid-position-embed: 1_0_0_0
pyramid-kernel-sizes: 5_5_5_5
pyramid-ffn-ratios: 4_4_4_4
pyramid-heads: 1_2_4_8
train-subset: train_asr
valid-subset: dev_asr
max-epoch: 50
max-update: 100000
num-workers: 8
patience: 10
no-progress-bar: True
log-interval: 100
seed: 1
report-accuracy: True
#load-pretrained-encoder-from:
#load-pretrained-decoder-from:
share-decoder-input-output-embed: True
optimizer: adam
clip-norm: 10.0
lr-scheduler: inverse_sqrt
warmup-init-lr: 1e-7
warmup-updates: 10000
lr: 2e-3
#adam_betas: (0.9,0.98)
criterion: label_smoothed_cross_entropy
label_smoothing: 0.1
conv-channels: 1024
dropout: 0.1
activation-fn: relu
encoder-ffn-embed-dim: 2048
encoder-layers: 12
decoder-layers: 6
encoder-attention-heads: 4
decoder-embed-dim: 256
decoder-ffn-embed-dim: 2048
decoder-attention-heads: 4
attention-dropout: 0.1
activation-dropout: 0.1
train-subset: train_st arch: pys2t_transformer_s
valid-subset: dev_st encoder-embed-dim: 512
pyramid-stages: 3
pyramid-layers: 3_6_3
pyramid-sr-ratios: 2_2_2
pyramid-embed-dims: 128_256_512
pyramid-reduced-embed: conv
pyramid-embed-norm: True
pyramid-position-embed: 1_0_0
pyramid-kernel-sizes: 5_5_5
pyramid-ffn-ratios: 8_8_4
pyramid-heads: 2_4_8
train-subset: train_asr
valid-subset: dev_asr
max-epoch: 50 max-epoch: 50
max-update: 100000 max-update: 100000
...@@ -14,7 +27,6 @@ report-accuracy: True ...@@ -14,7 +27,6 @@ report-accuracy: True
#load-pretrained-encoder-from: #load-pretrained-encoder-from:
#load-pretrained-decoder-from: #load-pretrained-decoder-from:
arch: s2t_transformer_s
share-decoder-input-output-embed: True share-decoder-input-output-embed: True
optimizer: adam optimizer: adam
clip-norm: 10.0 clip-norm: 10.0
...@@ -24,25 +36,17 @@ warmup-updates: 10000 ...@@ -24,25 +36,17 @@ warmup-updates: 10000
lr: 2e-3 lr: 2e-3
#adam_betas: (0.9,0.98) #adam_betas: (0.9,0.98)
ctc-weight: 0.3 criterion: label_smoothed_cross_entropy
criterion: label_smoothed_cross_entropy_with_ctc
label_smoothing: 0.1 label_smoothing: 0.1
conv-kernel-sizes: 5,5
conv-channels: 1024 conv-channels: 1024
dropout: 0.1 dropout: 0.1
activation-fn: relu activation-fn: relu
encoder-embed-dim: 256
encoder-ffn-embed-dim: 2048 encoder-ffn-embed-dim: 2048
encoder-layers: 12 encoder-layers: 12
decoder-layers: 6 decoder-layers: 6
encoder-attention-heads: 4 encoder-attention-heads: 4
encoder-attention-type: relative
decoder-attention-type: relative
max-encoder-relative-length: 100
max-decoder-relative-length: 20
decoder-embed-dim: 256 decoder-embed-dim: 256
decoder-ffn-embed-dim: 2048 decoder-ffn-embed-dim: 2048
decoder-attention-heads: 4 decoder-attention-heads: 4
......
arch: pys2t_transformer_s
#conv-kernel-sizes: 5
encoder-embed-dim: 512
pyramid-layers: 3_6_9
ctc-layer: 7
train-subset: train_st train-subset: train_st
valid-subset: dev_st valid-subset: dev_st
...@@ -14,7 +20,6 @@ report-accuracy: True ...@@ -14,7 +20,6 @@ report-accuracy: True
#load-pretrained-encoder-from: #load-pretrained-encoder-from:
#load-pretrained-decoder-from: #load-pretrained-decoder-from:
arch: s2t_conformer_s
share-decoder-input-output-embed: True share-decoder-input-output-embed: True
optimizer: adam optimizer: adam
clip-norm: 10.0 clip-norm: 10.0
...@@ -28,20 +33,14 @@ ctc-weight: 0.3 ...@@ -28,20 +33,14 @@ ctc-weight: 0.3
criterion: label_smoothed_cross_entropy_with_ctc criterion: label_smoothed_cross_entropy_with_ctc
label_smoothing: 0.1 label_smoothing: 0.1
conv-kernel-sizes: 5,5
conv-channels: 1024 conv-channels: 1024
dropout: 0.1 dropout: 0.1
activation-fn: relu activation-fn: relu
encoder-embed-dim: 256
encoder-ffn-embed-dim: 2048 encoder-ffn-embed-dim: 2048
encoder-layers: 12 encoder-layers: 12
decoder-layers: 6 decoder-layers: 6
encoder-attention-heads: 4 encoder-attention-heads: 4
macaron-style: True
use-cnn-module: True
cnn-module-kernel: 31
decoder-embed-dim: 256 decoder-embed-dim: 256
decoder-ffn-embed-dim: 2048 decoder-ffn-embed-dim: 2048
decoder-attention-heads: 4 decoder-attention-heads: 4
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论