Commit 9ac7a1aa by xuchen

update the shell scripts

parent f9fa133d
......@@ -34,7 +34,7 @@ dropout: 0.1
activation-fn: relu
encoder-embed-dim: 256
encoder-ffn-embed-dim: 2048
encoder-layers: 12
encoder-layers: 6
decoder-layers: 6
encoder-attention-heads: 4
......
......@@ -24,7 +24,7 @@ device=()
gpu_num=8
update_freq=1
root_dir=~/st/fairseq
root_dir=~/st/Fairseq-S2T
pwd_dir=$PWD
# dataset
......
......@@ -3,8 +3,8 @@
# training the model
gpu_num=8
update_freq=1
max_tokens=40000
update_freq=2
max_tokens=20000
extra_tag=
extra_parameter=
......@@ -13,7 +13,7 @@ extra_parameter=
#extra_parameter="${extra_parameter} "
exp_tag=
train_config=asr_train_ctc.yaml
train_config=train_ctc.yaml
cmd="./run.sh
--stage 1
......
......@@ -21,7 +21,7 @@ clip-norm: 10.0
lr-scheduler: inverse_sqrt
warmup-init-lr: 1e-7
warmup-updates: 8000
lr: 1e-3
lr: 5e-4
adam_betas: (0.9,0.98)
criterion: label_smoothed_cross_entropy
......
......@@ -24,7 +24,7 @@ device=()
gpu_num=8
update_freq=1
root_dir=~/st/fairseq
root_dir=~/st/Fairseq-S2T
pwd_dir=$PWD
# dataset
......@@ -53,7 +53,7 @@ train_config=st_train_ctc.yaml
# training setting
fp16=1
max_tokens=40000
max_tokens=4096
step_valid=0
bleu_valid=0
......
......@@ -12,7 +12,7 @@ extra_parameter=
#extra_tag="${extra_tag}"
#extra_parameter="${extra_parameter} "
exp_tag=
exp_tag=baseline
train_config=train.yaml
cmd="./run.sh
......
......@@ -33,7 +33,7 @@ dropout: 0.1
activation-fn: relu
encoder-embed-dim: 256
encoder-ffn-embed-dim: 2048
encoder-layers: 12
encoder-layers: 6
decoder-layers: 6
encoder-attention-heads: 4
......
......@@ -34,7 +34,7 @@ dropout: 0.1
activation-fn: relu
encoder-embed-dim: 256
encoder-ffn-embed-dim: 2048
encoder-layers: 12
encoder-layers: 6
decoder-layers: 6
encoder-attention-heads: 4
......
train-subset: train_st
valid-subset: dev_st
max-epoch: 100
max-epoch: 50
max-update: 100000
num-workers: 8
......@@ -34,7 +34,7 @@ dropout: 0.1
activation-fn: relu
encoder-embed-dim: 256
encoder-ffn-embed-dim: 2048
encoder-layers: 12
encoder-layers: 6
decoder-layers: 6
encoder-attention-heads: 4
......
......@@ -24,7 +24,7 @@ device=()
gpu_num=8
update_freq=1
root_dir=~/st/fairseq
root_dir=~/st/Fairseq-S2T
pwd_dir=$PWD
# dataset
......@@ -101,8 +101,8 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
### Task dependent. You have to make data the following preparation part by yourself.
### But you can utilize Kaldi recipes in most cases
echo "stage 0: ASR Data Preparation"
if [[ ! -e ${data_dir} ]]; then
mkdir -p ${data_dir}
if [[ ! -e ${data_dir}/${lang} ]]; then
mkdir -p ${data_dir}/${lang}
fi
cmd="python ${root_dir}/examples/speech_to_text/prep_mustc_data.py
......@@ -166,7 +166,7 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
${data_dir}
--config-yaml ${data_config}
--train-config ${train_config}
--task speech_to_text
--task ${task}
--max-tokens ${max_tokens}
--update-freq ${update_freq}
--log-interval 100
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论