Commit ea1870e5 by xuchen

modify the shell scripts

parent 64a9e37e
......@@ -12,7 +12,7 @@ log-interval: 100
seed: 1
report-accuracy: True
arch: s2t_conformer_s
arch: s2t_transformer_s
share-decoder-input-output-embed: True
optimizer: adam
clip-norm: 10.0
......
......@@ -12,7 +12,7 @@ extra_parameter=
#extra_tag="${extra_tag}"
#extra_parameter="${extra_parameter} "
exp_tag=baseline_997
exp_tag=baseline
train_config=train.yaml
cmd="./run.sh
......
......@@ -93,8 +93,8 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
### Task dependent. You have to make data the following preparation part by yourself.
### But you can utilize Kaldi recipes in most cases
echo "stage 0: ASR Data Preparation"
if [[ ! -e ${data_dir} ]]; then
mkdir -p ${data_dir}
if [[ ! -e ${data_dir}/${lang} ]]; then
mkdir -p ${data_dir}/${lang}
fi
cmd="python ${root_dir}/examples/speech_to_text/prep_mustc_data.py
......
train-subset: train
valid-subset: dev
valid-subset: valid
max-epoch: 50
max-update: 100000
......
......@@ -2,7 +2,7 @@
gpu_num=1
test_subset=(tst-COMMON)
test_subset=(test)
exp_name=
if [ "$#" -eq 1 ]; then
......
......@@ -43,7 +43,7 @@ data_dir=~/st/data/${dataset}/mt/${lang}
train_prefix=train
valid_prefix=dev
test_prefix=tst-COMMON
test_subset=(tst-COMMON)
test_subset=(test)
# exp
extra_tag=
......@@ -148,9 +148,9 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
cmd="python ${root_dir}/fairseq_cli/preprocess.py
--source-lang ${src_lang} --target-lang ${tgt_lang}
--trainpref ${data_dir}/data/train/${train_prefix}
--validpref ${data_dir}/data/dev/${valid_prefix}
--testpref ${data_dir}/data/test/${test_prefix}
--trainpref ${data_dir}/data/${train_prefix}
--validpref ${data_dir}/data/${valid_prefix}
--testpref ${data_dir}/data/${test_prefix}
--destdir ${data_dir}/data-bin
--srcdict ${data_dir}/${src_vocab_prefix}.txt
--tgtdict ${data_dir}/${tgt_vocab_prefix}.txt
......
......@@ -33,7 +33,7 @@ dropout: 0.1
activation-fn: relu
encoder-embed-dim: 256
encoder-ffn-embed-dim: 2048
encoder-layers: 6
encoder-layers: 12
decoder-layers: 6
encoder-attention-heads: 4
......
......@@ -34,7 +34,7 @@ dropout: 0.1
activation-fn: relu
encoder-embed-dim: 256
encoder-ffn-embed-dim: 2048
encoder-layers: 6
encoder-layers: 12
decoder-layers: 6
encoder-attention-heads: 4
......
......@@ -34,7 +34,7 @@ dropout: 0.1
activation-fn: relu
encoder-embed-dim: 256
encoder-ffn-embed-dim: 2048
encoder-layers: 6
encoder-layers: 12
decoder-layers: 6
encoder-attention-heads: 4
......
......@@ -34,7 +34,7 @@ dropout: 0.1
activation-fn: relu
encoder-embed-dim: 256
encoder-ffn-embed-dim: 2048
encoder-layers: 6
encoder-layers: 12
decoder-layers: 6
encoder-attention-heads: 4
encoder-attention-type: rel_selfattn
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论