PhoenixStormJr commited on
Commit
0315fda
·
verified ·
1 Parent(s): 036e8ac

Upload config.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. config.py +134 -0
config.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+ from multiprocessing import cpu_count
4
+
5
+
6
+ def config_file_change_fp32():
7
+ for config_file in ["32k.json", "40k.json", "48k.json"]:
8
+ with open(f"configs/{config_file}", "r") as f:
9
+ strr = f.read().replace("true", "false")
10
+ with open(f"configs/{config_file}", "w") as f:
11
+ f.write(strr)
12
+ with open("trainset_preprocess_pipeline_print.py", "r") as f:
13
+ strr = f.read().replace("3.7", "3.0")
14
+ with open("trainset_preprocess_pipeline_print.py", "w") as f:
15
+ f.write(strr)
16
+
17
+
18
+ class Config:
19
+ def __init__(self):
20
+ self.device = "cuda:0"
21
+ self.is_half = True
22
+ self.n_cpu = 0
23
+ self.gpu_name = None
24
+ self.gpu_mem = None
25
+ (
26
+ self.python_cmd,
27
+ self.listen_port,
28
+ self.iscolab,
29
+ self.noparallel,
30
+ self.noautoopen,
31
+ self.paperspace,
32
+ self.is_cli,
33
+ ) = self.arg_parse()
34
+
35
+ self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
36
+
37
+ @staticmethod
38
+ def arg_parse() -> tuple:
39
+ parser = argparse.ArgumentParser()
40
+ parser.add_argument("--port", type=int, default=7860, help="Listen port")
41
+ parser.add_argument(
42
+ "--pycmd", type=str, default="python", help="Python command"
43
+ )
44
+ parser.add_argument("--colab", action="store_true", help="Launch in colab")
45
+ parser.add_argument(
46
+ "--noparallel", action="store_true", help="Disable parallel processing"
47
+ )
48
+ parser.add_argument(
49
+ "--noautoopen",
50
+ action="store_true",
51
+ help="Do not open in browser automatically",
52
+ )
53
+ parser.add_argument( # Fork Feature. Paperspace integration for web UI
54
+ "--paperspace", action="store_true", help="Note that this argument just shares a gradio link for the web UI. Thus can be used on other non-local CLI systems."
55
+ )
56
+ parser.add_argument( # Fork Feature. Embed a CLI into the infer-web.py
57
+ "--is_cli", action="store_true", help="Use the CLI instead of setting up a gradio UI. This flag will launch an RVC text interface where you can execute functions from infer-web.py!"
58
+ )
59
+ cmd_opts = parser.parse_args()
60
+
61
+ cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7860
62
+
63
+ return (
64
+ cmd_opts.pycmd,
65
+ cmd_opts.port,
66
+ cmd_opts.colab,
67
+ cmd_opts.noparallel,
68
+ cmd_opts.noautoopen,
69
+ cmd_opts.paperspace,
70
+ cmd_opts.is_cli,
71
+ )
72
+
73
+ def device_config(self) -> tuple:
74
+ if torch.cuda.is_available():
75
+ i_device = int(self.device.split(":")[-1])
76
+ self.gpu_name = torch.cuda.get_device_name(i_device)
77
+ if (
78
+ ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
79
+ or "P40" in self.gpu_name.upper()
80
+ or "1060" in self.gpu_name
81
+ or "1070" in self.gpu_name
82
+ or "1080" in self.gpu_name
83
+ ):
84
+ print("16系/10系显卡和P40强制单精度")
85
+ self.is_half = False
86
+ config_file_change_fp32()
87
+ else:
88
+ self.gpu_name = None
89
+ self.gpu_mem = int(
90
+ torch.cuda.get_device_properties(i_device).total_memory
91
+ / 1024
92
+ / 1024
93
+ / 1024
94
+ + 0.4
95
+ )
96
+ if self.gpu_mem <= 4:
97
+ with open("trainset_preprocess_pipeline_print.py", "r") as f:
98
+ strr = f.read().replace("3.7", "3.0")
99
+ with open("trainset_preprocess_pipeline_print.py", "w") as f:
100
+ f.write(strr)
101
+ elif torch.backends.mps.is_available():
102
+ print("没有发现支持的N卡, 使用MPS进行推理")
103
+ self.device = "mps"
104
+ self.is_half = False
105
+ config_file_change_fp32()
106
+ else:
107
+ print("没有发现支持的N卡, 使用CPU进行推理")
108
+ self.device = "cpu"
109
+ self.is_half = False
110
+ config_file_change_fp32()
111
+
112
+ if self.n_cpu == 0:
113
+ self.n_cpu = cpu_count()
114
+
115
+ if self.is_half:
116
+ # 6G显存配置
117
+ x_pad = 3
118
+ x_query = 10
119
+ x_center = 60
120
+ x_max = 65
121
+ else:
122
+ # 5G显存配置
123
+ x_pad = 1
124
+ x_query = 6
125
+ x_center = 38
126
+ x_max = 41
127
+
128
+ if self.gpu_mem != None and self.gpu_mem <= 4:
129
+ x_pad = 1
130
+ x_query = 5
131
+ x_center = 30
132
+ x_max = 32
133
+
134
+ return x_pad, x_query, x_center, x_max