diff --git a/examples/aishell3/tts3/run.sh b/examples/aishell3/tts3/run.sh index b5da076b2d0..8dcecaa0370 100755 --- a/examples/aishell3/tts3/run.sh +++ b/examples/aishell3/tts3/run.sh @@ -43,10 +43,7 @@ fi if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_aishell3 # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_aishell3 diff --git a/examples/canton/tts3/run.sh b/examples/canton/tts3/run.sh index 3a3dfe0a587..acfc502239b 100755 --- a/examples/canton/tts3/run.sh +++ b/examples/canton/tts3/run.sh @@ -46,10 +46,7 @@ fi # we have only tested the following models so far if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ../../csmsc/tts3/local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_canton # considering the balance between speed and quality, we recommend that you use hifigan as vocoder # ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_csmsc diff --git a/examples/csmsc/tts2/run.sh b/examples/csmsc/tts2/run.sh index 6279ec5799c..5732ea3c774 100755 --- a/examples/csmsc/tts2/run.sh +++ b/examples/csmsc/tts2/run.sh @@ -45,10 +45,7 @@ fi # we have only tested the following models so far if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx speedyspeech_csmsc # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_csmsc diff --git a/examples/csmsc/tts3/run.sh b/examples/csmsc/tts3/run.sh index dd8c9f3e685..a7b4e4239ba 100755 --- a/examples/csmsc/tts3/run.sh +++ b/examples/csmsc/tts3/run.sh @@ -45,10 +45,7 @@ fi # we have only tested the following models so far if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_csmsc # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_csmsc diff --git a/examples/csmsc/tts3/run_cnndecoder.sh b/examples/csmsc/tts3/run_cnndecoder.sh index 96b446c529d..f356f31335c 100755 --- a/examples/csmsc/tts3/run_cnndecoder.sh +++ b/examples/csmsc/tts3/run_cnndecoder.sh @@ -58,10 +58,7 @@ fi # paddle2onnx non streaming if [ ${stage} -le 7 ] && [ ${stop_stage} -ge 7 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_csmsc # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_csmsc @@ -77,10 +74,7 @@ fi # paddle2onnx streaming if [ ${stage} -le 9 ] && [ ${stop_stage} -ge 9 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade # streaming acoustic model ./local/paddle2onnx.sh ${train_output_path} inference_streaming inference_onnx_streaming fastspeech2_csmsc_am_encoder_infer ./local/paddle2onnx.sh ${train_output_path} inference_streaming inference_onnx_streaming fastspeech2_csmsc_am_decoder diff --git a/examples/csmsc/vits/run.sh b/examples/csmsc/vits/run.sh index f2c5d452f94..03c59702b7d 100755 --- a/examples/csmsc/vits/run.sh +++ b/examples/csmsc/vits/run.sh @@ -45,10 +45,7 @@ fi # # we have only tested the following models so far # if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # # install paddle2onnx -# version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') -# if [[ -z "$version" || ${version} != '1.0.0' ]]; then -# pip install paddle2onnx==1.0.0 -# fi +# pip install paddle2onnx --upgrade # ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx vits_csmsc # fi diff --git a/examples/ljspeech/tts3/run.sh b/examples/ljspeech/tts3/run.sh index aacd4cc03a8..0d8da920cbe 100755 --- a/examples/ljspeech/tts3/run.sh +++ b/examples/ljspeech/tts3/run.sh @@ -45,10 +45,7 @@ fi # we have only tested the following models so far if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_ljspeech # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_ljspeech diff --git a/examples/vctk/tts3/run.sh b/examples/vctk/tts3/run.sh index a112b94b7bd..76307bd5f32 100755 --- a/examples/vctk/tts3/run.sh +++ b/examples/vctk/tts3/run.sh @@ -43,10 +43,7 @@ fi if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_vctk # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_vctk diff --git a/examples/zh_en_tts/tts3/run.sh b/examples/zh_en_tts/tts3/run.sh index 12f99081af8..a4d86480b34 100755 --- a/examples/zh_en_tts/tts3/run.sh +++ b/examples/zh_en_tts/tts3/run.sh @@ -46,10 +46,7 @@ fi if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_mix # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_aishell3