Skip to content

Commit b8a0948

Browse files
Merge branch 'perseus' into master
2 parents 9a3bc05 + 9346f04 commit b8a0948

File tree

51 files changed

+3010
-25
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

51 files changed

+3010
-25
lines changed
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
--feats-audio-feat
2+
logfb
3+
--feats-sample-frequency
4+
8000
5+
--feats-frame-length
6+
25
7+
--feats-fb-type
8+
linear
9+
--feats-low-freq
10+
20
11+
--feats-high-freq
12+
3700
13+
--feats-num-filters
14+
64
15+
--feats-snip-edges
16+
false
17+
--feats-use-energy
18+
false
19+
--mvn-context
20+
150

egs/sre20-cts/v1/config_fbank64_stmn_effnetb4_v2_arcs30m0.3_trn_alllangs_nocv_nocnceleb_adam_lr0.01.amp.v1.sh

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -40,18 +40,18 @@ nnet=$nnet_dir/model_ep0060.pth
4040
ft_batch_size_1gpu=4
4141
ft_eff_batch_size=128 # effective batch size
4242
ft_min_chunk=10
43-
ft_max_chunk=60
43+
ft_max_chunk=10
4444
ft_ipe=1
4545
ft_lr=0.05
46-
ft_nnet_num_epochs=21
46+
ft_nnet_num_epochs=30
4747
ft_margin=0.3
4848
ft_margin_warmup=3
4949

5050
ft_opt_opt="--opt-optimizer sgd --opt-lr $ft_lr --opt-momentum 0.9 --opt-weight-decay 1e-5 --use-amp --var-batch-size"
5151
ft_lrs_opt="--lrsch-lrsch-type cos_lr --lrsch-t 2500 --lrsch-t-mul 2 --lrsch-warm-restarts --lrsch-gamma 0.75 --lrsch-min-lr 1e-4 --lrsch-warmup-steps 100 --lrsch-update-lr-on-opt-step"
5252
ft_nnet_name=${nnet_name}.ft_${ft_min_chunk}_${ft_max_chunk}_arcm${ft_margin}_sgdcos_lr${ft_lr}_b${ft_eff_batch_size}_amp.v2
5353
ft_nnet_dir=exp/xvector_nnets/$ft_nnet_name
54-
ft_nnet=$ft_nnet_dir/model_ep0020.pth
54+
ft_nnet=$ft_nnet_dir/model_ep0030.pth
5555

5656

5757
# xvector last-layer finetuning alllangs
Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
# Res2Net50 x-vector with mixed precision training
2+
3+
# acoustic features
4+
feat_config=conf/fbank64_mvn_8k.pyconf
5+
feat_type=fbank64_stmn
6+
7+
8+
# x-vector training
9+
nnet_data=alllangs_nocv_nocnceleb
10+
nnet_num_augs=4
11+
aug_opt="--train-aug-cfg conf/reverb_noise_aug.yml --val-aug-cfg conf/reverb_noise_aug.yml"
12+
13+
batch_size_1gpu=8
14+
eff_batch_size=512 # effective batch size
15+
ipe=$nnet_num_augs
16+
min_chunk=4
17+
max_chunk=4
18+
lr=0.01
19+
20+
nnet_type=res2net50
21+
dropout=0
22+
embed_dim=256
23+
width_factor=1.625
24+
scale=4
25+
ws_tag=w26s4
26+
27+
s=30
28+
margin_warmup=20
29+
margin=0.3
30+
31+
nnet_opt="--resnet-type $nnet_type --in-feats 64 --in-channels 1 --in-kernel-size 3 --in-stride 1 --no-maxpool --norm-layer instance-norm-affine --head-norm-layer layer-norm --no-maxpool --res2net-width-factor $width_factor --res2net-scale $scale"
32+
33+
opt_opt="--opt-optimizer adam --opt-lr $lr --opt-beta1 0.9 --opt-beta2 0.95 --opt-weight-decay 1e-5 --opt-amsgrad" # --use-amp"
34+
lrs_opt="--lrsch-lrsch-type exp_lr --lrsch-decay-rate 0.5 --lrsch-decay-steps 10000 --lrsch-hold-steps 40000 --lrsch-min-lr 1e-5 --lrsch-warmup-steps 1000 --lrsch-update-lr-on-opt-step"
35+
36+
nnet_name=${feat_type}_${nnet_type}${ws_tag}_eina_hln_e${embed_dim}_arcs${s}m${margin}_do${dropout}_adam_lr${lr}_b${eff_batch_size}_amp.v1.$nnet_data
37+
nnet_num_epochs=50
38+
nnet_dir=exp/xvector_nnets/$nnet_name
39+
nnet=$nnet_dir/model_ep0050.pth
40+
41+
42+
# xvector full net finetuning with out-of-domain
43+
ft_batch_size_1gpu=4
44+
ft_eff_batch_size=128 # effective batch size
45+
ft_min_chunk=10
46+
ft_max_chunk=20
47+
ft_ipe=1
48+
ft_lr=0.05
49+
ft_nnet_num_epochs=21
50+
ft_nnet_num_epochs=45
51+
ft_margin=0.3
52+
ft_margin_warmup=3
53+
54+
ft_opt_opt="--opt-optimizer sgd --opt-lr $ft_lr --opt-momentum 0.9 --opt-weight-decay 1e-5 --use-amp --var-batch-size"
55+
ft_lrs_opt="--lrsch-lrsch-type cos_lr --lrsch-t 2500 --lrsch-t-mul 2 --lrsch-warm-restarts --lrsch-gamma 0.75 --lrsch-min-lr 1e-4 --lrsch-warmup-steps 100 --lrsch-update-lr-on-opt-step"
56+
ft_nnet_name=${nnet_name}.ft_${ft_min_chunk}_${ft_max_chunk}_arcm${ft_margin}_sgdcos_lr${ft_lr}_b${ft_eff_batch_size}_amp.v2
57+
ft_nnet_dir=exp/xvector_nnets/$ft_nnet_name
58+
ft_nnet=$ft_nnet_dir/model_ep0014.pth
59+
60+
61+
# xvector last-layer finetuning realtel
62+
reg_layers_classif=0
63+
reg_layers_enc="0 1 2 3 4"
64+
nnet_adapt_data=realtel
65+
ft2_batch_size_1gpu=16
66+
ft2_eff_batch_size=128 # effective batch size
67+
ft2_ipe=1
68+
ft2_lr=0.01
69+
ft2_nnet_num_epochs=35
70+
ft2_margin_warmup=3
71+
ft2_reg_weight_embed=0.1
72+
ft2_min_chunk=10
73+
ft2_max_chunk=60
74+
75+
ft2_opt_opt="--opt-optimizer sgd --opt-lr $ft2_lr --opt-momentum 0.9 --opt-weight-decay 1e-5 --use-amp --var-batch-size"
76+
ft2_lrs_opt="--lrsch-lrsch-type cos_lr --lrsch-t 2500 --lrsch-t-mul 2 --lrsch-warm-restarts --lrsch-gamma 0.75 --lrsch-min-lr 1e-4 --lrsch-warmup-steps 100 --lrsch-update-lr-on-opt-step"
77+
ft2_nnet_name=${ft_nnet_name}.ft_eaffine_rege_w${ft2_reg_weight_embed}_${ft2_min_chunk}_${ft2_max_chunk}_sgdcos_lr${ft2_lr}_b${ft2_eff_batch_size}_amp.v2.$nnet_adapt_data
78+
ft2_nnet_dir=exp/xvector_nnets/$ft2_nnet_name
79+
ft2_nnet=$ft2_nnet_dir/model_ep0015.pth
80+
81+
82+
# xvector full nnet finetuning
83+
ft3_batch_size_1gpu=2
84+
ft3_eff_batch_size=128 # effective batch size
85+
ft3_ipe=1
86+
ft3_lr=0.01
87+
ft3_nnet_num_epochs=10
88+
ft3_margin_warmup=20
89+
ft3_reg_weight_embed=0.1
90+
ft3_reg_weight_enc=0.1
91+
ft3_min_chunk=10
92+
ft3_max_chunk=60
93+
94+
ft3_opt_opt="--opt-optimizer sgd --opt-lr $ft3_lr --opt-momentum 0.9 --opt-weight-decay 1e-5 --use-amp --var-batch-size"
95+
ft3_lrs_opt="--lrsch-lrsch-type cos_lr --lrsch-t 2500 --lrsch-t-mul 2 --lrsch-warm-restarts --lrsch-gamma 0.75 --lrsch-min-lr 1e-4 --lrsch-warmup-steps 100 --lrsch-update-lr-on-opt-step"
96+
ft3_nnet_name=${ft2_nnet_name}.ft_reg_wenc${ft3_reg_weight_enc}_we${ft3_reg_weigth_embed}_${ft3_min_chunk}_${ft3_max_chunk}_sgdcos_lr${ft3_lr}_b${ft3_eff_batch_size}_amp.v2
97+
ft3_nnet_name=${ft2_nnet_name}.ft_${ft3_min_chunk}_${ft3_max_chunk}_sgdcos_lr${ft3_lr}_b${ft3_eff_batch_size}_amp.v2
98+
ft3_nnet_dir=exp/xvector_nnets/$ft3_nnet_name
99+
ft3_nnet=$ft3_nnet_dir/model_ep0010.pth
100+
101+
102+
# back-end
103+
plda_aug_config=conf/noise_aug.yml
104+
plda_num_augs=0
105+
# if [ $plda_num_augs -eq 0 ]; then
106+
# plda_data=sre_tel
107+
# plda_adapt_data=sre18_cmn2_adapt_lab
108+
# else
109+
# plda_data=sre_tel_augx${plda_num_augs}
110+
# plda_adapt_data=sre18_cmn2_adapt_lab_augx${plda_num_augs}
111+
# fi
112+
# plda_type=splda
113+
# lda_dim=200
114+
# plda_y_dim=150
115+
# plda_z_dim=200
116+
Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,112 @@
1+
# ResNet34 x-vector with mixed precision training
2+
3+
# acoustic features
4+
feat_config=conf/fbank64_mvn_8k.pyconf
5+
feat_type=fbank64_stmn
6+
7+
8+
# x-vector training
9+
nnet_data=alllangs_nocv_nocnceleb
10+
nnet_num_augs=4
11+
aug_opt="--train-aug-cfg conf/reverb_noise_aug.yml --val-aug-cfg conf/reverb_noise_aug.yml"
12+
13+
batch_size_1gpu=32
14+
eff_batch_size=512 # effective batch size
15+
ipe=$nnet_num_augs
16+
min_chunk=4
17+
max_chunk=4
18+
lr=0.01
19+
20+
nnet_type=resnet34
21+
dropout=0
22+
embed_dim=256
23+
24+
s=30
25+
margin_warmup=20
26+
margin=0.3
27+
28+
nnet_opt="--resnet-type $nnet_type --in-feats 64 --in-channels 1 --in-kernel-size 3 --in-stride 1 --no-maxpool --norm-layer instance-norm-affine --head-norm-layer layer-norm --pool-type scaled-dot-prod-att-v1 --pool-num-heads 64 --pool-d-k 128 --pool-d-v 128 --pool-bin-attn"
29+
30+
opt_opt="--opt-optimizer adam --opt-lr $lr --opt-beta1 0.9 --opt-beta2 0.95 --opt-weight-decay 1e-5 --opt-amsgrad" # --use-amp"
31+
lrs_opt="--lrsch-lrsch-type exp_lr --lrsch-decay-rate 0.5 --lrsch-decay-steps 10000 --lrsch-hold-steps 40000 --lrsch-min-lr 1e-5 --lrsch-warmup-steps 1000 --lrsch-update-lr-on-opt-step"
32+
33+
nnet_name=${feat_type}_${nnet_type}_eina_hln_bmhah64d8192_e${embed_dim}_arcs${s}m${margin}_do${dropout}_adam_lr${lr}_b${eff_batch_size}_amp.v1.$nnet_data
34+
nnet_num_epochs=50
35+
nnet_dir=exp/xvector_nnets/$nnet_name
36+
nnet=$nnet_dir/model_ep0050.pth
37+
38+
39+
# xvector full net finetuning with out-of-domain
40+
ft_batch_size_1gpu=4
41+
ft_eff_batch_size=128 # effective batch size
42+
ft_min_chunk=10
43+
ft_max_chunk=60
44+
ft_ipe=1
45+
ft_lr=0.05
46+
ft_nnet_num_epochs=21
47+
ft_margin=0.3
48+
ft_margin_warmup=3
49+
50+
ft_opt_opt="--opt-optimizer sgd --opt-lr $ft_lr --opt-momentum 0.9 --opt-weight-decay 1e-5 --use-amp --var-batch-size"
51+
ft_lrs_opt="--lrsch-lrsch-type cos_lr --lrsch-t 2500 --lrsch-t-mul 2 --lrsch-warm-restarts --lrsch-gamma 0.75 --lrsch-min-lr 1e-4 --lrsch-warmup-steps 100 --lrsch-update-lr-on-opt-step"
52+
ft_nnet_name=${nnet_name}.ft_${ft_min_chunk}_${ft_max_chunk}_arcm${ft_margin}_sgdcos_lr${ft_lr}_b${ft_eff_batch_size}_amp.v2
53+
ft_nnet_dir=exp/xvector_nnets/$ft_nnet_name
54+
ft_nnet=$ft_nnet_dir/model_ep0021.pth
55+
56+
57+
# xvector last-layer finetuning realtel
58+
reg_layers_classif=0
59+
reg_layers_enc="0 1 2 3 4"
60+
nnet_adapt_data=realtel
61+
ft2_batch_size_1gpu=16
62+
ft2_eff_batch_size=128 # effective batch size
63+
ft2_ipe=1
64+
ft2_lr=0.01
65+
ft2_nnet_num_epochs=35
66+
ft2_margin_warmup=3
67+
ft2_reg_weight_embed=0.1
68+
ft2_min_chunk=10
69+
ft2_max_chunk=60
70+
71+
ft2_opt_opt="--opt-optimizer sgd --opt-lr $ft2_lr --opt-momentum 0.9 --opt-weight-decay 1e-5 --use-amp --var-batch-size"
72+
ft2_lrs_opt="--lrsch-lrsch-type cos_lr --lrsch-t 2500 --lrsch-t-mul 2 --lrsch-warm-restarts --lrsch-gamma 0.75 --lrsch-min-lr 1e-4 --lrsch-warmup-steps 100 --lrsch-update-lr-on-opt-step"
73+
ft2_nnet_name=${ft_nnet_name}.ft_eaffine_rege_w${ft2_reg_weight_embed}_${ft2_min_chunk}_${ft2_max_chunk}_sgdcos_lr${ft2_lr}_b${ft2_eff_batch_size}_amp.v2.$nnet_adapt_data
74+
ft2_nnet_dir=exp/xvector_nnets/$ft2_nnet_name
75+
ft2_nnet=$ft2_nnet_dir/model_ep0015.pth
76+
77+
78+
# xvector full nnet finetuning
79+
ft3_batch_size_1gpu=2
80+
ft3_eff_batch_size=128 # effective batch size
81+
ft3_ipe=1
82+
ft3_lr=0.01
83+
ft3_nnet_num_epochs=10
84+
ft3_margin_warmup=20
85+
ft3_reg_weight_embed=0.1
86+
ft3_reg_weight_enc=0.1
87+
ft3_min_chunk=10
88+
ft3_max_chunk=60
89+
90+
ft3_opt_opt="--opt-optimizer sgd --opt-lr $ft3_lr --opt-momentum 0.9 --opt-weight-decay 1e-5 --use-amp --var-batch-size"
91+
ft3_lrs_opt="--lrsch-lrsch-type cos_lr --lrsch-t 2500 --lrsch-t-mul 2 --lrsch-warm-restarts --lrsch-gamma 0.75 --lrsch-min-lr 1e-4 --lrsch-warmup-steps 100 --lrsch-update-lr-on-opt-step"
92+
ft3_nnet_name=${ft2_nnet_name}.ft_reg_wenc${ft3_reg_weight_enc}_we${ft3_reg_weigth_embed}_${ft3_min_chunk}_${ft3_max_chunk}_sgdcos_lr${ft3_lr}_b${ft3_eff_batch_size}_amp.v2
93+
ft3_nnet_name=${ft2_nnet_name}.ft_${ft3_min_chunk}_${ft3_max_chunk}_sgdcos_lr${ft3_lr}_b${ft3_eff_batch_size}_amp.v2
94+
ft3_nnet_dir=exp/xvector_nnets/$ft3_nnet_name
95+
ft3_nnet=$ft3_nnet_dir/model_ep0010.pth
96+
97+
98+
# back-end
99+
plda_aug_config=conf/noise_aug.yml
100+
plda_num_augs=0
101+
# if [ $plda_num_augs -eq 0 ]; then
102+
# plda_data=sre_tel
103+
# plda_adapt_data=sre18_cmn2_adapt_lab
104+
# else
105+
# plda_data=sre_tel_augx${plda_num_augs}
106+
# plda_adapt_data=sre18_cmn2_adapt_lab_augx${plda_num_augs}
107+
# fi
108+
# plda_type=splda
109+
# lda_dim=200
110+
# plda_y_dim=150
111+
# plda_z_dim=200
112+
Lines changed: 114 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,114 @@
1+
# ResNet34 x-vector with mixed precision training
2+
3+
# acoustic features
4+
feat_config=conf/fbank64_mvn_8k.pyconf
5+
feat_type=fbank64_stmn
6+
7+
8+
# x-vector training
9+
nnet_data=alllangs_nocv_nocnceleb
10+
nnet_num_augs=4
11+
aug_opt="--train-aug-cfg conf/reverb_noise_aug.yml --val-aug-cfg conf/reverb_noise_aug.yml"
12+
13+
batch_size_1gpu=32
14+
eff_batch_size=512 # effective batch size
15+
ipe=$nnet_num_augs
16+
min_chunk=4
17+
max_chunk=4
18+
lr=0.01
19+
20+
nnet_type=resnet34
21+
dropout=0
22+
embed_dim=256
23+
24+
s=30
25+
margin_warmup=20
26+
margin=0.3
27+
28+
attstats_inner=128
29+
30+
nnet_opt="--resnet-type $nnet_type --in-feats 64 --in-channels 1 --in-kernel-size 3 --in-stride 1 --no-maxpool --norm-layer instance-norm-affine --head-norm-layer layer-norm --pool-type ch-wise-att-mean-stddev --pool-inner-feats $attstats_inner"
31+
32+
opt_opt="--opt-optimizer adam --opt-lr $lr --opt-beta1 0.9 --opt-beta2 0.95 --opt-weight-decay 1e-5 --opt-amsgrad" # --use-amp"
33+
lrs_opt="--lrsch-lrsch-type exp_lr --lrsch-decay-rate 0.5 --lrsch-decay-steps 10000 --lrsch-hold-steps 40000 --lrsch-min-lr 1e-5 --lrsch-warmup-steps 1000 --lrsch-update-lr-on-opt-step"
34+
35+
nnet_name=${feat_type}_${nnet_type}_eina_hln_chattstatsi128_e${embed_dim}_arcs${s}m${margin}_do${dropout}_adam_lr${lr}_b${eff_batch_size}_amp.v1.$nnet_data
36+
nnet_num_epochs=50
37+
nnet_dir=exp/xvector_nnets/$nnet_name
38+
nnet=$nnet_dir/model_ep0050.pth
39+
40+
41+
# xvector full net finetuning with out-of-domain
42+
ft_batch_size_1gpu=4
43+
ft_eff_batch_size=128 # effective batch size
44+
ft_min_chunk=10
45+
ft_max_chunk=60
46+
ft_ipe=1
47+
ft_lr=0.05
48+
ft_nnet_num_epochs=21
49+
ft_margin=0.3
50+
ft_margin_warmup=3
51+
52+
ft_opt_opt="--opt-optimizer sgd --opt-lr $ft_lr --opt-momentum 0.9 --opt-weight-decay 1e-5 --use-amp --var-batch-size"
53+
ft_lrs_opt="--lrsch-lrsch-type cos_lr --lrsch-t 2500 --lrsch-t-mul 2 --lrsch-warm-restarts --lrsch-gamma 0.75 --lrsch-min-lr 1e-4 --lrsch-warmup-steps 100 --lrsch-update-lr-on-opt-step"
54+
ft_nnet_name=${nnet_name}.ft_${ft_min_chunk}_${ft_max_chunk}_arcm${ft_margin}_sgdcos_lr${ft_lr}_b${ft_eff_batch_size}_amp.v2
55+
ft_nnet_dir=exp/xvector_nnets/$ft_nnet_name
56+
ft_nnet=$ft_nnet_dir/model_ep0021.pth
57+
58+
59+
# xvector last-layer finetuning realtel
60+
reg_layers_classif=0
61+
reg_layers_enc="0 1 2 3 4"
62+
nnet_adapt_data=realtel
63+
ft2_batch_size_1gpu=16
64+
ft2_eff_batch_size=128 # effective batch size
65+
ft2_ipe=1
66+
ft2_lr=0.01
67+
ft2_nnet_num_epochs=35
68+
ft2_margin_warmup=3
69+
ft2_reg_weight_embed=0.1
70+
ft2_min_chunk=10
71+
ft2_max_chunk=60
72+
73+
ft2_opt_opt="--opt-optimizer sgd --opt-lr $ft2_lr --opt-momentum 0.9 --opt-weight-decay 1e-5 --use-amp --var-batch-size"
74+
ft2_lrs_opt="--lrsch-lrsch-type cos_lr --lrsch-t 2500 --lrsch-t-mul 2 --lrsch-warm-restarts --lrsch-gamma 0.75 --lrsch-min-lr 1e-4 --lrsch-warmup-steps 100 --lrsch-update-lr-on-opt-step"
75+
ft2_nnet_name=${ft_nnet_name}.ft_eaffine_rege_w${ft2_reg_weight_embed}_${ft2_min_chunk}_${ft2_max_chunk}_sgdcos_lr${ft2_lr}_b${ft2_eff_batch_size}_amp.v2.$nnet_adapt_data
76+
ft2_nnet_dir=exp/xvector_nnets/$ft2_nnet_name
77+
ft2_nnet=$ft2_nnet_dir/model_ep0015.pth
78+
79+
80+
# xvector full nnet finetuning
81+
ft3_batch_size_1gpu=2
82+
ft3_eff_batch_size=128 # effective batch size
83+
ft3_ipe=1
84+
ft3_lr=0.01
85+
ft3_nnet_num_epochs=10
86+
ft3_margin_warmup=20
87+
ft3_reg_weight_embed=0.1
88+
ft3_reg_weight_enc=0.1
89+
ft3_min_chunk=10
90+
ft3_max_chunk=60
91+
92+
ft3_opt_opt="--opt-optimizer sgd --opt-lr $ft3_lr --opt-momentum 0.9 --opt-weight-decay 1e-5 --use-amp --var-batch-size"
93+
ft3_lrs_opt="--lrsch-lrsch-type cos_lr --lrsch-t 2500 --lrsch-t-mul 2 --lrsch-warm-restarts --lrsch-gamma 0.75 --lrsch-min-lr 1e-4 --lrsch-warmup-steps 100 --lrsch-update-lr-on-opt-step"
94+
ft3_nnet_name=${ft2_nnet_name}.ft_reg_wenc${ft3_reg_weight_enc}_we${ft3_reg_weigth_embed}_${ft3_min_chunk}_${ft3_max_chunk}_sgdcos_lr${ft3_lr}_b${ft3_eff_batch_size}_amp.v2
95+
ft3_nnet_name=${ft2_nnet_name}.ft_${ft3_min_chunk}_${ft3_max_chunk}_sgdcos_lr${ft3_lr}_b${ft3_eff_batch_size}_amp.v2
96+
ft3_nnet_dir=exp/xvector_nnets/$ft3_nnet_name
97+
ft3_nnet=$ft3_nnet_dir/model_ep0010.pth
98+
99+
100+
# back-end
101+
plda_aug_config=conf/noise_aug.yml
102+
plda_num_augs=0
103+
# if [ $plda_num_augs -eq 0 ]; then
104+
# plda_data=sre_tel
105+
# plda_adapt_data=sre18_cmn2_adapt_lab
106+
# else
107+
# plda_data=sre_tel_augx${plda_num_augs}
108+
# plda_adapt_data=sre18_cmn2_adapt_lab_augx${plda_num_augs}
109+
# fi
110+
# plda_type=splda
111+
# lda_dim=200
112+
# plda_y_dim=150
113+
# plda_z_dim=200
114+

0 commit comments

Comments
 (0)