rmysmo commited on
Commit
f776fbb
·
1 Parent(s): 8e7c835

added translate

Browse files
Files changed (1) hide show
  1. app.py +240 -262
app.py CHANGED
@@ -1205,10 +1205,10 @@ def export_onnx(ModelPath, ExportedPath, MoeVS=True):
1205
  cpt = torch.load(ModelPath, map_location="cpu")
1206
  cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
1207
  hidden_channels = 256 if cpt.get("version",
1208
- "v1") == "v1" else 768 # cpt["config"][-2] # hidden_channels,为768Vec做准备
1209
 
1210
  test_phone = torch.rand(1, 200, hidden_channels) # hidden unit
1211
- test_phone_lengths = torch.tensor([200]).long() # hidden unit 长度(貌似没啥用)
1212
  test_pitch = torch.randint(size=(1, 200), low=5, high=255) # 基频(单位赫兹)
1213
  test_pitchf = torch.rand(1, 200) # nsf基频
1214
  test_ds = torch.LongTensor([0]) # 说话人ID
@@ -1512,22 +1512,6 @@ with gr.Blocks(theme=gr.themes.Base(), title='Voice DeepFake 💻') as app:
1512
  with gr.Tabs():
1513
  with gr.TabItem("Interfeys"):
1514
  gr.HTML("<center><h1> Voice DeepFake </h1></span>")
1515
- # gr.HTML("<center><h3> Если вы хотите использовать это пространство в частном порядке, я рекомендую продублировать его. </h3></span>")
1516
- # with gr.Row():
1517
- # gr.Markdown(
1518
- # """
1519
- # Если не работает, то пробуйте тут https://huggingface.co/spaces/Clebersla/RVC_V2_Huggingface_Version
1520
- # """
1521
- # )
1522
-
1523
- # Inference Preset Row
1524
- # with gr.Row():
1525
- # mangio_preset = gr.Dropdown(label="Inference Preset", choices=sorted(get_presets()))
1526
- # mangio_preset_name_save = gr.Textbox(
1527
- # label="Your preset name"
1528
- # )
1529
- # mangio_preset_save_btn = gr.Button('Save Preset', variant="primary")
1530
-
1531
  # Other RVC stuff
1532
  with gr.Row():
1533
  sid0 = gr.Dropdown(label="1. Modelni tanlang.", choices=sorted(names), value=check_for_name())
@@ -1881,12 +1865,6 @@ with gr.Blocks(theme=gr.themes.Base(), title='Voice DeepFake 💻') as app:
1881
  )
1882
  but1.click(fn=lambda: easy_uploader.clear())
1883
  with gr.TabItem("Modelni yuklash"):
1884
- # with gr.Row():
1885
- # gr.Markdown(
1886
- # """
1887
- # Если не работает, то пробуйте тут https://huggingface.co/spaces/Clebersla/RVC_V2_Huggingface_Version
1888
- # """
1889
- # )
1890
  with gr.Row():
1891
  url = gr.Textbox(label="Model URL manzilini kiriting:",
1892
  placeholder=".pth va .index fayllarni o'z ichiga olgan zipga havolani kiriting")
@@ -1908,244 +1886,244 @@ with gr.Blocks(theme=gr.themes.Base(), title='Voice DeepFake 💻') as app:
1908
  return num_files >= 2
1909
 
1910
 
1911
- if has_two_files_in_pretrained_folder():
1912
- print("Pretrained weights are downloaded. Training tab enabled!\n-------------------------------")
1913
- with gr.TabItem("Train", visible=False):
1914
- with gr.Row():
1915
- with gr.Column():
1916
- exp_dir1 = gr.Textbox(label="Voice Name:", value="My-Voice")
1917
- sr2 = gr.Radio(
1918
- label=i18n("目标采样率"),
1919
- choices=["40k", "48k"],
1920
- value="40k",
1921
- interactive=True,
1922
- visible=False
1923
- )
1924
- if_f0_3 = gr.Radio(
1925
- label=i18n("模型是否带音高指导(唱歌一定要, 语音可以不要)"),
1926
- choices=[True, False],
1927
- value=True,
1928
- interactive=True,
1929
- visible=False
1930
- )
1931
- version19 = gr.Radio(
1932
- label="RVC version",
1933
- choices=["v1", "v2"],
1934
- value="v2",
1935
- interactive=True,
1936
- visible=False,
1937
- )
1938
- np7 = gr.Slider(
1939
- minimum=0,
1940
- maximum=config.n_cpu,
1941
- step=1,
1942
- label="# of CPUs for data processing (Leave as it is)",
1943
- value=config.n_cpu,
1944
- interactive=True,
1945
- visible=True
1946
- )
1947
- trainset_dir4 = gr.Textbox(label="Path to your dataset (audios, not zip):", value="./dataset")
1948
- easy_uploader = gr.Files(
1949
- label='OR Drop your audios here. They will be uploaded in your dataset path above.',
1950
- file_types=['audio'])
1951
- but1 = gr.Button("1. Process The Dataset", variant="primary")
1952
- info1 = gr.Textbox(label="Status (wait until it says 'end preprocess'):", value="")
1953
- easy_uploader.upload(fn=upload_to_dataset, inputs=[easy_uploader, trainset_dir4],
1954
- outputs=[info1])
1955
- but1.click(
1956
- preprocess_dataset, [trainset_dir4, exp_dir1, sr2, np7], [info1]
1957
- )
1958
- with gr.Column():
1959
- spk_id5 = gr.Slider(
1960
- minimum=0,
1961
- maximum=4,
1962
- step=1,
1963
- label=i18n("请指定说话人id"),
1964
- value=0,
1965
- interactive=True,
1966
- visible=False
1967
- )
1968
- with gr.Accordion('GPU Settings', open=False, visible=False):
1969
- gpus6 = gr.Textbox(
1970
- label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"),
1971
- value=gpus,
1972
- interactive=True,
1973
- visible=False
1974
- )
1975
- gpu_info9 = gr.Textbox(label=i18n("显卡信息"), value=gpu_info)
1976
- f0method8 = gr.Radio(
1977
- label=i18n(
1978
- "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢"
1979
- ),
1980
- choices=["harvest", "crepe", "mangio-crepe", "rmvpe"],
1981
- # Fork feature: Crepe on f0 extraction for training.
1982
- value="rmvpe",
1983
- interactive=True,
1984
- )
1985
-
1986
- extraction_crepe_hop_length = gr.Slider(
1987
- minimum=1,
1988
- maximum=512,
1989
- step=1,
1990
- label=i18n("crepe_hop_length"),
1991
- value=128,
1992
- interactive=True,
1993
- visible=False,
1994
- )
1995
- f0method8.change(fn=whethercrepeornah, inputs=[f0method8],
1996
- outputs=[extraction_crepe_hop_length])
1997
- but2 = gr.Button("2. Pitch Extraction", variant="primary")
1998
- info2 = gr.Textbox(label="Status(Check the Colab Notebook's cell output):", value="",
1999
- max_lines=8)
2000
- but2.click(
2001
- extract_f0_feature,
2002
- [gpus6, np7, f0method8, if_f0_3, exp_dir1, version19, extraction_crepe_hop_length],
2003
- [info2],
2004
- )
2005
- with gr.Row():
2006
- with gr.Column():
2007
- total_epoch11 = gr.Slider(
2008
- minimum=1,
2009
- maximum=5000,
2010
- step=10,
2011
- label="Total # of training epochs (IF you choose a value too high, your model will sound horribly overtrained.):",
2012
- value=250,
2013
- interactive=True,
2014
- )
2015
- butstop = gr.Button(
2016
- "Stop Training",
2017
- variant='primary',
2018
- visible=False,
2019
- )
2020
- but3 = gr.Button("3. Train Model", variant="primary", visible=True)
2021
-
2022
- but3.click(fn=stoptraining, inputs=[gr.Number(value=0, visible=False)],
2023
- outputs=[but3, butstop])
2024
- butstop.click(fn=stoptraining, inputs=[gr.Number(value=1, visible=False)],
2025
- outputs=[butstop, but3])
2026
-
2027
- but4 = gr.Button("4.Train Index", variant="primary")
2028
- info3 = gr.Textbox(label="Status(Check the Colab Notebook's cell output):", value="",
2029
- max_lines=10)
2030
- with gr.Accordion("Training Preferences (You can leave these as they are)", open=False):
2031
- # gr.Markdown(value=i18n("step3: 填写训练设置, 开始训练模型和索引"))
2032
- with gr.Column():
2033
- save_epoch10 = gr.Slider(
2034
- minimum=1,
2035
- maximum=200,
2036
- step=1,
2037
- label="Backup every X amount of epochs:",
2038
- value=10,
2039
- interactive=True,
2040
- )
2041
- batch_size12 = gr.Slider(
2042
- minimum=1,
2043
- maximum=40,
2044
- step=1,
2045
- label="Batch Size (LEAVE IT unless you know what you're doing!):",
2046
- value=default_batch_size,
2047
- interactive=True,
2048
- )
2049
- if_save_latest13 = gr.Checkbox(
2050
- label="Save only the latest '.ckpt' file to save disk space.",
2051
- value=True,
2052
- interactive=True,
2053
- )
2054
- if_cache_gpu17 = gr.Checkbox(
2055
- label="Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement.",
2056
- value=False,
2057
- interactive=True,
2058
- )
2059
- if_save_every_weights18 = gr.Checkbox(
2060
- label="Save a small final model to the 'weights' folder at each save point.",
2061
- value=True,
2062
- interactive=True,
2063
- )
2064
- zip_model = gr.Button('5. Download Model')
2065
- zipped_model = gr.Files(label='Your Model and Index file can be downloaded here:')
2066
- zip_model.click(fn=zip_downloader, inputs=[exp_dir1], outputs=[zipped_model, info3])
2067
- with gr.Group():
2068
- with gr.Accordion("Base Model Locations:", open=False, visible=False):
2069
- pretrained_G14 = gr.Textbox(
2070
- label=i18n("加载预训练底模G路径"),
2071
- value="pretrained_v2/f0G40k.pth",
2072
- interactive=True,
2073
- )
2074
- pretrained_D15 = gr.Textbox(
2075
- label=i18n("加载预训练底模D路径"),
2076
- value="pretrained_v2/f0D40k.pth",
2077
- interactive=True,
2078
- )
2079
- gpus16 = gr.Textbox(
2080
- label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"),
2081
- value=gpus,
2082
- interactive=True,
2083
- )
2084
- sr2.change(
2085
- change_sr2,
2086
- [sr2, if_f0_3, version19],
2087
- [pretrained_G14, pretrained_D15, version19],
2088
- )
2089
- version19.change(
2090
- change_version19,
2091
- [sr2, if_f0_3, version19],
2092
- [pretrained_G14, pretrained_D15],
2093
- )
2094
- if_f0_3.change(
2095
- change_f0,
2096
- [if_f0_3, sr2, version19],
2097
- [f0method8, pretrained_G14, pretrained_D15],
2098
- )
2099
- but5 = gr.Button(i18n("一键训练"), variant="primary", visible=False)
2100
- but3.click(
2101
- click_train,
2102
- [
2103
- exp_dir1,
2104
- sr2,
2105
- if_f0_3,
2106
- spk_id5,
2107
- save_epoch10,
2108
- total_epoch11,
2109
- batch_size12,
2110
- if_save_latest13,
2111
- pretrained_G14,
2112
- pretrained_D15,
2113
- gpus16,
2114
- if_cache_gpu17,
2115
- if_save_every_weights18,
2116
- version19,
2117
- ],
2118
- [
2119
- info3,
2120
- butstop,
2121
- but3,
2122
- ],
2123
- )
2124
- but4.click(train_index, [exp_dir1, version19], info3)
2125
- but5.click(
2126
- train1key,
2127
- [
2128
- exp_dir1,
2129
- sr2,
2130
- if_f0_3,
2131
- trainset_dir4,
2132
- spk_id5,
2133
- np7,
2134
- f0method8,
2135
- save_epoch10,
2136
- total_epoch11,
2137
- batch_size12,
2138
- if_save_latest13,
2139
- pretrained_G14,
2140
- pretrained_D15,
2141
- gpus16,
2142
- if_cache_gpu17,
2143
- if_save_every_weights18,
2144
- version19,
2145
- extraction_crepe_hop_length
2146
- ],
2147
- info3,
2148
- )
2149
 
2150
  app.queue(concurrency_count=511, max_size=1022).launch(share=False, quiet=True)
2151
  # endregion
 
1205
  cpt = torch.load(ModelPath, map_location="cpu")
1206
  cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
1207
  hidden_channels = 256 if cpt.get("version",
1208
+ "v1") == "v1" else 768 # cpt["config"][-2] # hidden_channels
1209
 
1210
  test_phone = torch.rand(1, 200, hidden_channels) # hidden unit
1211
+ test_phone_lengths = torch.tensor([200]).long() # hidden unit
1212
  test_pitch = torch.randint(size=(1, 200), low=5, high=255) # 基频(单位赫兹)
1213
  test_pitchf = torch.rand(1, 200) # nsf基频
1214
  test_ds = torch.LongTensor([0]) # 说话人ID
 
1512
  with gr.Tabs():
1513
  with gr.TabItem("Interfeys"):
1514
  gr.HTML("<center><h1> Voice DeepFake </h1></span>")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1515
  # Other RVC stuff
1516
  with gr.Row():
1517
  sid0 = gr.Dropdown(label="1. Modelni tanlang.", choices=sorted(names), value=check_for_name())
 
1865
  )
1866
  but1.click(fn=lambda: easy_uploader.clear())
1867
  with gr.TabItem("Modelni yuklash"):
 
 
 
 
 
 
1868
  with gr.Row():
1869
  url = gr.Textbox(label="Model URL manzilini kiriting:",
1870
  placeholder=".pth va .index fayllarni o'z ichiga olgan zipga havolani kiriting")
 
1886
  return num_files >= 2
1887
 
1888
 
1889
+ # if has_two_files_in_pretrained_folder():
1890
+ # print("Pretrained weights are downloaded. Training tab enabled!\n-------------------------------")
1891
+ # with gr.TabItem("Train", visible=False):
1892
+ # with gr.Row():
1893
+ # with gr.Column():
1894
+ # exp_dir1 = gr.Textbox(label="Voice Name:", value="My-Voice")
1895
+ # sr2 = gr.Radio(
1896
+ # label=i18n("目标采样率"),
1897
+ # choices=["40k", "48k"],
1898
+ # value="40k",
1899
+ # interactive=True,
1900
+ # visible=False
1901
+ # )
1902
+ # if_f0_3 = gr.Radio(
1903
+ # label=i18n("模型是否带音高指导(唱歌一定要, 语音可以不要)"),
1904
+ # choices=[True, False],
1905
+ # value=True,
1906
+ # interactive=True,
1907
+ # visible=False
1908
+ # )
1909
+ # version19 = gr.Radio(
1910
+ # label="RVC version",
1911
+ # choices=["v1", "v2"],
1912
+ # value="v2",
1913
+ # interactive=True,
1914
+ # visible=False,
1915
+ # )
1916
+ # np7 = gr.Slider(
1917
+ # minimum=0,
1918
+ # maximum=config.n_cpu,
1919
+ # step=1,
1920
+ # label="# of CPUs for data processing (Leave as it is)",
1921
+ # value=config.n_cpu,
1922
+ # interactive=True,
1923
+ # visible=True
1924
+ # )
1925
+ # trainset_dir4 = gr.Textbox(label="Path to your dataset (audios, not zip):", value="./dataset")
1926
+ # easy_uploader = gr.Files(
1927
+ # label='OR Drop your audios here. They will be uploaded in your dataset path above.',
1928
+ # file_types=['audio'])
1929
+ # but1 = gr.Button("1. Process The Dataset", variant="primary")
1930
+ # info1 = gr.Textbox(label="Status (wait until it says 'end preprocess'):", value="")
1931
+ # easy_uploader.upload(fn=upload_to_dataset, inputs=[easy_uploader, trainset_dir4],
1932
+ # outputs=[info1])
1933
+ # but1.click(
1934
+ # preprocess_dataset, [trainset_dir4, exp_dir1, sr2, np7], [info1]
1935
+ # )
1936
+ # with gr.Column():
1937
+ # spk_id5 = gr.Slider(
1938
+ # minimum=0,
1939
+ # maximum=4,
1940
+ # step=1,
1941
+ # label=i18n("请指定说话人id"),
1942
+ # value=0,
1943
+ # interactive=True,
1944
+ # visible=False
1945
+ # )
1946
+ # with gr.Accordion('GPU Settings', open=False, visible=False):
1947
+ # gpus6 = gr.Textbox(
1948
+ # label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"),
1949
+ # value=gpus,
1950
+ # interactive=True,
1951
+ # visible=False
1952
+ # )
1953
+ # gpu_info9 = gr.Textbox(label=i18n("显卡信息"), value=gpu_info)
1954
+ # f0method8 = gr.Radio(
1955
+ # label=i18n(
1956
+ # "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢"
1957
+ # ),
1958
+ # choices=["harvest", "crepe", "mangio-crepe", "rmvpe"],
1959
+ # # Fork feature: Crepe on f0 extraction for training.
1960
+ # value="rmvpe",
1961
+ # interactive=True,
1962
+ # )
1963
+ #
1964
+ # extraction_crepe_hop_length = gr.Slider(
1965
+ # minimum=1,
1966
+ # maximum=512,
1967
+ # step=1,
1968
+ # label=i18n("crepe_hop_length"),
1969
+ # value=128,
1970
+ # interactive=True,
1971
+ # visible=False,
1972
+ # )
1973
+ # f0method8.change(fn=whethercrepeornah, inputs=[f0method8],
1974
+ # outputs=[extraction_crepe_hop_length])
1975
+ # but2 = gr.Button("2. Pitch Extraction", variant="primary")
1976
+ # info2 = gr.Textbox(label="Status(Check the Colab Notebook's cell output):", value="",
1977
+ # max_lines=8)
1978
+ # but2.click(
1979
+ # extract_f0_feature,
1980
+ # [gpus6, np7, f0method8, if_f0_3, exp_dir1, version19, extraction_crepe_hop_length],
1981
+ # [info2],
1982
+ # )
1983
+ # with gr.Row():
1984
+ # with gr.Column():
1985
+ # total_epoch11 = gr.Slider(
1986
+ # minimum=1,
1987
+ # maximum=5000,
1988
+ # step=10,
1989
+ # label="Total # of training epochs (IF you choose a value too high, your model will sound horribly overtrained.):",
1990
+ # value=250,
1991
+ # interactive=True,
1992
+ # )
1993
+ # butstop = gr.Button(
1994
+ # "Stop Training",
1995
+ # variant='primary',
1996
+ # visible=False,
1997
+ # )
1998
+ # but3 = gr.Button("3. Train Model", variant="primary", visible=True)
1999
+ #
2000
+ # but3.click(fn=stoptraining, inputs=[gr.Number(value=0, visible=False)],
2001
+ # outputs=[but3, butstop])
2002
+ # butstop.click(fn=stoptraining, inputs=[gr.Number(value=1, visible=False)],
2003
+ # outputs=[butstop, but3])
2004
+ #
2005
+ # but4 = gr.Button("4.Train Index", variant="primary")
2006
+ # info3 = gr.Textbox(label="Status(Check the Colab Notebook's cell output):", value="",
2007
+ # max_lines=10)
2008
+ # with gr.Accordion("Training Preferences (You can leave these as they are)", open=False):
2009
+ # # gr.Markdown(value=i18n("step3: 填写训练设置, 开始训练模型和索引"))
2010
+ # with gr.Column():
2011
+ # save_epoch10 = gr.Slider(
2012
+ # minimum=1,
2013
+ # maximum=200,
2014
+ # step=1,
2015
+ # label="Backup every X amount of epochs:",
2016
+ # value=10,
2017
+ # interactive=True,
2018
+ # )
2019
+ # batch_size12 = gr.Slider(
2020
+ # minimum=1,
2021
+ # maximum=40,
2022
+ # step=1,
2023
+ # label="Batch Size (LEAVE IT unless you know what you're doing!):",
2024
+ # value=default_batch_size,
2025
+ # interactive=True,
2026
+ # )
2027
+ # if_save_latest13 = gr.Checkbox(
2028
+ # label="Save only the latest '.ckpt' file to save disk space.",
2029
+ # value=True,
2030
+ # interactive=True,
2031
+ # )
2032
+ # if_cache_gpu17 = gr.Checkbox(
2033
+ # label="Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement.",
2034
+ # value=False,
2035
+ # interactive=True,
2036
+ # )
2037
+ # if_save_every_weights18 = gr.Checkbox(
2038
+ # label="Save a small final model to the 'weights' folder at each save point.",
2039
+ # value=True,
2040
+ # interactive=True,
2041
+ # )
2042
+ # zip_model = gr.Button('5. Download Model')
2043
+ # zipped_model = gr.Files(label='Your Model and Index file can be downloaded here:')
2044
+ # zip_model.click(fn=zip_downloader, inputs=[exp_dir1], outputs=[zipped_model, info3])
2045
+ # with gr.Group():
2046
+ # with gr.Accordion("Base Model Locations:", open=False, visible=False):
2047
+ # pretrained_G14 = gr.Textbox(
2048
+ # label=i18n("加载预训练底模G路径"),
2049
+ # value="pretrained_v2/f0G40k.pth",
2050
+ # interactive=True,
2051
+ # )
2052
+ # pretrained_D15 = gr.Textbox(
2053
+ # label=i18n("加载预训练底模D路径"),
2054
+ # value="pretrained_v2/f0D40k.pth",
2055
+ # interactive=True,
2056
+ # )
2057
+ # gpus16 = gr.Textbox(
2058
+ # label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"),
2059
+ # value=gpus,
2060
+ # interactive=True,
2061
+ # )
2062
+ # sr2.change(
2063
+ # change_sr2,
2064
+ # [sr2, if_f0_3, version19],
2065
+ # [pretrained_G14, pretrained_D15, version19],
2066
+ # )
2067
+ # version19.change(
2068
+ # change_version19,
2069
+ # [sr2, if_f0_3, version19],
2070
+ # [pretrained_G14, pretrained_D15],
2071
+ # )
2072
+ # if_f0_3.change(
2073
+ # change_f0,
2074
+ # [if_f0_3, sr2, version19],
2075
+ # [f0method8, pretrained_G14, pretrained_D15],
2076
+ # )
2077
+ # but5 = gr.Button(i18n("一键训练"), variant="primary", visible=False)
2078
+ # but3.click(
2079
+ # click_train,
2080
+ # [
2081
+ # exp_dir1,
2082
+ # sr2,
2083
+ # if_f0_3,
2084
+ # spk_id5,
2085
+ # save_epoch10,
2086
+ # total_epoch11,
2087
+ # batch_size12,
2088
+ # if_save_latest13,
2089
+ # pretrained_G14,
2090
+ # pretrained_D15,
2091
+ # gpus16,
2092
+ # if_cache_gpu17,
2093
+ # if_save_every_weights18,
2094
+ # version19,
2095
+ # ],
2096
+ # [
2097
+ # info3,
2098
+ # butstop,
2099
+ # but3,
2100
+ # ],
2101
+ # )
2102
+ # but4.click(train_index, [exp_dir1, version19], info3)
2103
+ # but5.click(
2104
+ # train1key,
2105
+ # [
2106
+ # exp_dir1,
2107
+ # sr2,
2108
+ # if_f0_3,
2109
+ # trainset_dir4,
2110
+ # spk_id5,
2111
+ # np7,
2112
+ # f0method8,
2113
+ # save_epoch10,
2114
+ # total_epoch11,
2115
+ # batch_size12,
2116
+ # if_save_latest13,
2117
+ # pretrained_G14,
2118
+ # pretrained_D15,
2119
+ # gpus16,
2120
+ # if_cache_gpu17,
2121
+ # if_save_every_weights18,
2122
+ # version19,
2123
+ # extraction_crepe_hop_length
2124
+ # ],
2125
+ # info3,
2126
+ # )
2127
 
2128
  app.queue(concurrency_count=511, max_size=1022).launch(share=False, quiet=True)
2129
  # endregion