admin commited on
Commit
9d4dce9
·
1 Parent(s): 2000f4c
Files changed (10) hide show
  1. README.md +2 -2
  2. app.py +32 -36
  3. modules/cmd.py +0 -86
  4. modules/data.py +0 -202
  5. modules/exif.py +0 -170
  6. modules/gif.py +0 -97
  7. modules/rct.py +0 -91
  8. modules/splitter.py +0 -59
  9. modules/tools.py +0 -9
  10. utils.py +15 -32
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Online Tools
3
  emoji: 🛠️
4
  colorFrom: green
5
  colorTo: indigo
@@ -8,5 +8,5 @@ sdk_version: 5.22.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
- short_description: Online tool collection
12
  ---
 
1
  ---
2
+ title: QR Code Generator
3
  emoji: 🛠️
4
  colorFrom: green
5
  colorTo: indigo
 
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
+ short_description: Convert text to QR code
12
  ---
app.py CHANGED
@@ -1,22 +1,12 @@
1
  import gradio as gr
2
- from modules.data import data_converter
3
- from modules.exif import clexif
4
- from modules.gif import video2gif
5
- from modules.cmd import cmd_inject
6
- from modules.rct import rct_generator
7
- from modules.splitter import str_splitter
8
- from modules.tools import webtools
9
- from utils import EN_US
10
 
11
  ZH2EN = {
12
- "# 在线工具合集": "# Online Tools Collection",
13
- "数据件转换": "Data Converter",
14
- "图片 EXIF 清理": "Image EXIF Cleaner",
15
- "视频转 GIF 动图": "Video to GIF",
16
- "命令注入测试": "CMD Injector",
17
- "随机对照试验生成": "RCT Generator",
18
- "字符串分割": "String Splitter",
19
- "在线编程辅助工具": "Web Tools",
20
  }
21
 
22
 
@@ -24,28 +14,34 @@ def _L(zh_txt: str):
24
  return ZH2EN[zh_txt] if EN_US else zh_txt
25
 
26
 
27
- if __name__ == "__main__":
28
- with gr.Blocks() as demo:
29
- gr.Markdown(_L("# 在线工具合集"))
30
- with gr.Tab(_L("图片 EXIF 清理")):
31
- clexif()
32
-
33
- with gr.Tab(_L("数据文件转换")):
34
- data_converter()
35
 
36
- with gr.Tab(_L("视频转 GIF 动图")):
37
- video2gif()
 
 
38
 
39
- with gr.Tab(_L("命令注入测试")):
40
- cmd_inject()
41
 
42
- with gr.Tab(_L("随机对照试验生成")):
43
- rct_generator()
44
 
45
- with gr.Tab(_L("字符串分割")):
46
- str_splitter()
47
 
48
- with gr.Tab(_L("在线编程辅助工具")):
49
- webtools()
50
-
51
- demo.launch()
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from utils import download_file, EN_US, API_QR, TMP_DIR
 
 
 
 
 
 
 
3
 
4
  ZH2EN = {
5
+ "二维码输出尺寸": "Image size",
6
+ "输入": "Input text",
7
+ "输出二维码": "QR code",
8
+ "输入文字在线生成二维码": "Enter text to generate a QR code.",
9
+ "状态栏": "Status",
 
 
 
10
  }
11
 
12
 
 
14
  return ZH2EN[zh_txt] if EN_US else zh_txt
15
 
16
 
17
+ def infer(img_size: int, input_txt: str):
18
+ status = "Success"
19
+ img = None
20
+ try:
21
+ if (not input_txt) or input_txt == "0":
22
+ raise ValueError("Please input valid text!")
 
 
23
 
24
+ img = download_file(
25
+ f"{API_QR}/?size={img_size}x{img_size}&data={input_txt}",
26
+ f"{TMP_DIR}/qrcode.jpg",
27
+ )
28
 
29
+ except Exception as e:
30
+ status = f"{e}"
31
 
32
+ return status, img
 
33
 
 
 
34
 
35
+ if __name__ == "__main__":
36
+ gr.Interface(
37
+ fn=infer,
38
+ inputs=[
39
+ gr.Slider(35, 1000, 217, label=_L("二维码输出尺寸")),
40
+ gr.Textbox(label=_L("输入文本"), placeholder=_L("输入文字在线生成二维码")),
41
+ ],
42
+ outputs=[
43
+ gr.Textbox(label=_L("状态栏"), show_copy_button=True),
44
+ gr.Image(label=_L("输出二维码"), show_share_button=False),
45
+ ],
46
+ flagging_mode="never",
47
+ ).launch()
modules/cmd.py DELETED
@@ -1,86 +0,0 @@
1
- import os
2
- import time
3
- import threading
4
- import subprocess
5
- import gradio as gr
6
- from utils import EN_US
7
-
8
- ZH2EN = {
9
- "状态栏": "Status",
10
- "执行结果": "Command output",
11
- "命令执行测试工具": "Command executor",
12
- "输入一个命令, 点击查看其执行结果": "Enter a command, and click to see its result",
13
- "执行超时时间": "Execution timeout",
14
- }
15
-
16
-
17
- def _L(zh_txt: str):
18
- return ZH2EN[zh_txt] if EN_US else zh_txt
19
-
20
-
21
- class Inject:
22
- def __init__(self):
23
- self.output = ""
24
- self.status = "Success"
25
- self.thread: threading.Thread = None
26
- self.timeout = 10
27
- self.delay = 0.1
28
-
29
- def run(self, cmd: str):
30
- try:
31
- self.output = subprocess.check_output(
32
- cmd,
33
- shell=True,
34
- stderr=subprocess.STDOUT,
35
- text=True,
36
- )
37
-
38
- except Exception as e:
39
- self.status = f" {e} "
40
- self.output = ""
41
-
42
- def infer(self, cmd: str):
43
- self.status = "Success"
44
- self.output = ""
45
- try:
46
- if self.thread and self.thread.is_alive():
47
- self.thread.join(timeout=0)
48
-
49
- self.thread = threading.Thread(
50
- target=self.run,
51
- args=(cmd,),
52
- daemon=True,
53
- )
54
- self.thread.start()
55
- delay = 0
56
- while self.thread and self.thread.is_alive():
57
- delay += self.delay
58
- time.sleep(self.delay)
59
- if delay > self.timeout:
60
- self.thread.join(timeout=0)
61
- self.status = "Killed"
62
- self.output = _L("执行超时时间") + f": {self.timeout}s"
63
- break
64
-
65
- except Exception as e:
66
- self.status = f" {e} "
67
- self.output = ""
68
-
69
- return self.status, self.output
70
-
71
-
72
- def cmd_inject():
73
- inject = Inject()
74
- return gr.Interface(
75
- fn=inject.infer,
76
- inputs=gr.Textbox(
77
- label=_L("命令执行测试工具"),
78
- value="dir" if os.name == "nt" else "ls /",
79
- placeholder=_L("输入一个命令, 点击查看其执行结果"),
80
- ),
81
- outputs=[
82
- gr.Textbox(label=_L("状态栏"), show_copy_button=True),
83
- gr.TextArea(label=_L("执行结果"), show_copy_button=True),
84
- ],
85
- flagging_mode="never",
86
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/data.py DELETED
@@ -1,202 +0,0 @@
1
- import csv
2
- import json
3
- import gradio as gr
4
- import pandas as pd
5
- from utils import clean_dir, TMP_DIR, EN_US
6
-
7
-
8
- MODE = {"from": "jsonl", "to": "csv"}
9
- ZH2EN = {
10
- "模式": "Mode",
11
- "上传原数据": "Upload input file",
12
- "转换": "Convert",
13
- "下载转换数据": "Download output file",
14
- "数据预览": "Data viewer",
15
- "支持的 JSON 格式": "Supported JSON format",
16
- "支持的 JSON Lines 格式": "Supported jsonl format",
17
- "支持的 CSV 格式": "Supported CSV format",
18
- "状态栏": "Status",
19
- }
20
-
21
-
22
- def _L(zh_txt: str):
23
- return ZH2EN[zh_txt] if EN_US else zh_txt
24
-
25
-
26
- def encoder_json(file_path: str):
27
- with open(file_path, "r", encoding="utf-8") as file:
28
- data_list = list(json.load(file))
29
-
30
- return data_list
31
-
32
-
33
- def encoder_jsonl(file_path: str):
34
- data_list = []
35
- with open(file_path, "r", encoding="utf-8") as file:
36
- for line in file:
37
- # 加载每一行的 JSON 数据
38
- json_data = json.loads(line.strip())
39
- data_list.append(json_data)
40
-
41
- return data_list
42
-
43
-
44
- def encoder_csv(file_path: str):
45
- data_list = []
46
- try:
47
- with open(file_path, "r", encoding="utf-8") as file:
48
- csv_reader = csv.DictReader(file)
49
- for row in csv_reader:
50
- data_list.append(dict(row))
51
-
52
- except UnicodeDecodeError:
53
- with open(file_path, "r", encoding="GBK") as file:
54
- csv_reader = csv.DictReader(file)
55
- for row in csv_reader:
56
- data_list.append(dict(row))
57
-
58
- return data_list
59
-
60
-
61
- def decoder_json(data_list: list, file_path: str):
62
- if data_list:
63
- with open(file_path, "w", encoding="utf-8") as file:
64
- # 将整个列表转换成 JSON 格式并写入文件
65
- json.dump(data_list, file, ensure_ascii=False, indent=4)
66
-
67
- return file_path
68
-
69
-
70
- def decoder_csv(data_list: list, file_path: str):
71
- if data_list: # 提取第一个字典的键作为表头
72
- header = list(data_list[0].keys())
73
- with open(file_path, "w", newline="", encoding="utf-8") as file:
74
- csv_writer = csv.writer(file) # 写入表头
75
- csv_writer.writerow(header) # 逐项写入字典的值
76
- for item in data_list:
77
- csv_writer.writerow([item[key] for key in header])
78
-
79
- return file_path
80
-
81
-
82
- def decoder_jsonl(data_list: list, file_path: str):
83
- if data_list:
84
- with open(file_path, "w", encoding="utf-8") as file:
85
- for data in data_list:
86
- # 将每个 JSON 对象转换成字符串并写入文件,每行一个对象
87
- json_line = json.dumps(data, ensure_ascii=False)
88
- file.write(f"{json_line}\n")
89
-
90
- return file_path
91
-
92
-
93
- def change_mode(input: str):
94
- global MODE
95
- affix = input.split(" ")
96
- if affix[1] == "→":
97
- MODE["from"] = affix[0]
98
- MODE["to"] = affix[2]
99
-
100
- else:
101
- MODE["from"] = affix[2]
102
- MODE["to"] = affix[0]
103
-
104
-
105
- # outer func
106
- def infer(input_file: str, cache=f"{TMP_DIR}/data"):
107
- status = "Success"
108
- output_file = previews = None
109
- try:
110
- clean_dir(cache)
111
- src_fmt = MODE["from"]
112
- dst_fmt = MODE["to"]
113
- data_list = eval(f"encoder_{src_fmt}")(input_file)
114
- output_file = eval(f"decoder_{dst_fmt}")(data_list, f"{cache}/output.{dst_fmt}")
115
- previews = pd.DataFrame(data_list)
116
-
117
- except Exception as e:
118
- status = f"{e}"
119
-
120
- return status, output_file, previews
121
-
122
-
123
- def data_converter(tab_cfgs=["jsonl ⇆ csv", "json ⇆ csv", "json ⇆ jsonl"]):
124
- with gr.Blocks() as data:
125
- for item in tab_cfgs:
126
- types = item.split(" ⇆ ")
127
- with gr.Tab(item) as tab:
128
- with gr.Row():
129
- with gr.Column():
130
- option = gr.Dropdown(
131
- choices=[
132
- f"{types[0]} → {types[1]}",
133
- f"{types[0]} ← {types[1]}",
134
- ],
135
- label=_L("模式"),
136
- value=f"{types[0]} → {types[1]}",
137
- )
138
- input_file = gr.File(
139
- type="filepath",
140
- label=_L("上传原数据"),
141
- file_types=[f".{types[0]}", f".{types[1]}"],
142
- )
143
- convert_btn = gr.Button(_L("转换"))
144
-
145
- with gr.Column():
146
- status_bar = gr.Textbox(
147
- label=_L("状态栏"),
148
- show_copy_button=True,
149
- )
150
- output_file = gr.File(type="filepath", label=_L("下载转换数据"))
151
- data_viewer = gr.Dataframe(label=_L("数据预览"))
152
-
153
- option.change(change_mode, inputs=option)
154
- tab.select(change_mode, inputs=option)
155
- convert_btn.click(
156
- infer,
157
- inputs=input_file,
158
- outputs=[status_bar, output_file, data_viewer],
159
- )
160
-
161
- with gr.Row():
162
- with gr.Column():
163
- gr.Markdown(
164
- f"""
165
- ## {_L('支持的 JSON Lines 格式')}
166
- ```
167
- {{"key1": "val11", "key2": "val12", ...}}
168
- {{"key1": "val21", "key2": "val22", ...}}
169
- ...
170
- ```
171
- ## {_L('支持的 CSV 格式')}
172
- ```
173
- key1, key2, ...
174
- val11, val12, ...
175
- val21, val22, ...
176
- ...
177
- ```
178
- """
179
- )
180
-
181
- with gr.Column():
182
- gr.Markdown(
183
- f"""
184
- ## {_L('支持的 JSON 格式')}
185
- ```
186
- [
187
- {{
188
- "key1": "val11",
189
- "key2": "val12",
190
- ...
191
- }},
192
- {{
193
- "key1": "val21",
194
- "key2": "val22",
195
- ...
196
- }},
197
- ...
198
- ]
199
- ```"""
200
- )
201
-
202
- return data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/exif.py DELETED
@@ -1,170 +0,0 @@
1
- import os
2
- import imghdr
3
- import hashlib
4
- import exifread
5
- import gradio as gr
6
- import pandas as pd
7
- from PIL import Image
8
- from utils import clean_dir, compress, mk_dir, unzip, TMP_DIR, EN_US
9
-
10
- ZH2EN = {
11
- "单图片处理": "Process single picture",
12
- "上传图片": "Upload picture",
13
- "导出原格式": "Export original format",
14
- "下载清理 EXIF 后的图片": "Download cleaned picture",
15
- "批量处理": "Batch processor",
16
- "上传包含多图片的 zip 压缩包 (确保上传进度至 100% 后再提交)": "Upload pictures zip (please ensure the zip is completely uploaded before clicking submit)",
17
- "导出原格式": "Export original format",
18
- "下载清理 EXIF 后的多图片压缩包": "Download cleaned pictures",
19
- "EXIF 列表": "EXIF list",
20
- "状态栏": "Status",
21
- }
22
-
23
-
24
- def _L(zh_txt: str):
25
- return ZH2EN[zh_txt] if EN_US else zh_txt
26
-
27
-
28
- def get_exif(origin_file_path):
29
- with open(origin_file_path, "rb") as image_file:
30
- tags = exifread.process_file(image_file)
31
-
32
- output = ""
33
- for key in tags.keys():
34
- value = str(tags[key])
35
- output += "{0}:{1}\n".format(key, value)
36
-
37
- return output
38
-
39
-
40
- def clear_exif(img_path: str, cache: str, img_mode=None, outdir=""):
41
- save_path = f"{cache}/{outdir}output." + img_path.split(".")[-1]
42
- img = Image.open(img_path)
43
- data = list(img.getdata())
44
- if img_mode:
45
- save_path = f"{cache}/{outdir}{hashlib.md5(img_path.encode()).hexdigest()}.jpg"
46
- else:
47
- img_mode = img.mode
48
-
49
- img_without_exif = Image.new(img_mode, img.size)
50
- img_without_exif.putdata(data)
51
- img_without_exif.save(save_path)
52
- return save_path
53
-
54
-
55
- def find_images(dir_path: str):
56
- found_images = []
57
- for root, _, files in os.walk(dir_path):
58
- for file in files:
59
- fpath = os.path.join(root, file).replace("\\", "/")
60
- if imghdr.what(fpath) != None:
61
- found_images.append(fpath)
62
-
63
- return found_images
64
-
65
-
66
- # outer func
67
- def infer(img_path: str, keep_ext: bool, cache=f"{TMP_DIR}/exif"):
68
- status = "Success"
69
- out_img = out_exif = None
70
- try:
71
- if not img_path or imghdr.what(img_path) == None:
72
- raise ValueError("请输入图片!")
73
-
74
- clean_dir(cache)
75
- img_mode = "RGB" if not keep_ext else None
76
- out_img = clear_exif(img_path, cache, img_mode)
77
- out_exif = get_exif(img_path)
78
-
79
- except Exception as e:
80
- status = f"{e}"
81
-
82
- return status, out_img, out_exif
83
-
84
-
85
- # outer func
86
- def batch_infer(imgs_zip: str, keep_ext: bool, cache=f"{TMP_DIR}/exif"):
87
- status = "Success"
88
- out_images = out_exifs = None
89
- try:
90
- if not imgs_zip:
91
- raise ValueError("Please upload pictures zip!")
92
-
93
- clean_dir(cache)
94
- mk_dir(f"{cache}/outputs")
95
- extract_to = f"{cache}/inputs"
96
- unzip(imgs_zip, extract_to)
97
- imgs = find_images(extract_to)
98
- img_mode = "RGB" if not keep_ext else None
99
- exifs = []
100
- for img in imgs:
101
- clear_exif(img, cache, img_mode, "outputs/")
102
- exifs.append({"filename": os.path.basename(img), "exif": get_exif(img)})
103
-
104
- if not exifs:
105
- raise ValueError("No picture in the zip")
106
-
107
- out_images = f"{cache}/outputs.zip"
108
- compress(f"{cache}/outputs", out_images)
109
- out_exifs = pd.DataFrame(exifs)
110
-
111
- except Exception as e:
112
- status = f"{e}"
113
-
114
- return status, out_images, out_exifs
115
-
116
-
117
- def clexif():
118
- with gr.Blocks() as iface:
119
- with gr.Tab(_L("单图片处理")):
120
- gr.Interface(
121
- fn=infer,
122
- inputs=[
123
- gr.File(
124
- label=_L("上传图片"),
125
- file_types=["image"],
126
- ),
127
- gr.Checkbox(
128
- label=_L("导出原格式"),
129
- value=False,
130
- ),
131
- ],
132
- outputs=[
133
- gr.Textbox(label=_L("状态栏"), show_copy_button=True),
134
- gr.Image(
135
- label=_L("下载清理 EXIF 后的图片"),
136
- type="filepath",
137
- show_share_button=False,
138
- ),
139
- gr.Textbox(label="EXIF", show_copy_button=True),
140
- ],
141
- flagging_mode="never",
142
- )
143
-
144
- with gr.Tab(_L("批量处理")):
145
- gr.Interface(
146
- fn=batch_infer,
147
- inputs=[
148
- gr.File(
149
- label=_L(
150
- "上传包含多图片的 zip 压缩包 (确保上传进度至 100% 后再提交)"
151
- ),
152
- file_types=[".zip"],
153
- ),
154
- gr.Checkbox(
155
- label=_L("导出原格式"),
156
- value=False,
157
- ),
158
- ],
159
- outputs=[
160
- gr.Textbox(label=_L("状态栏"), show_copy_button=True),
161
- gr.File(
162
- label=_L("下载清理 EXIF 后的多图片压缩包"),
163
- type="filepath",
164
- ),
165
- gr.Dataframe(label=_L("EXIF 列表")),
166
- ],
167
- flagging_mode="never",
168
- )
169
-
170
- return iface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/gif.py DELETED
@@ -1,97 +0,0 @@
1
- import os
2
- import math
3
- import gradio as gr
4
- from PIL import Image, ImageSequence
5
- from moviepy.editor import VideoFileClip
6
- from utils import clean_dir, TMP_DIR, EN_US
7
-
8
- ZH2EN = {
9
- "上传视频 (请确保上传完整后再点击提交,若时长大于五秒可先在线裁剪)": "Upload video (please ensure the video is completely uploaded before clicking submit, you can crop it online to less than 5s)",
10
- "倍速": "Speed",
11
- "状态栏": "Status",
12
- "文件名": "Filename",
13
- "下载动图": "Download GIF",
14
- }
15
-
16
-
17
- def _L(zh_txt: str):
18
- return ZH2EN[zh_txt] if EN_US else zh_txt
19
-
20
-
21
- def get_frame_dur(gif: Image):
22
- # 获取 GIF 图像中第一帧的 duration
23
- dur = gif.info.get("duration", 100)
24
- # 返回每一帧的 duration
25
- return [frame.info.get("duration", dur) for frame in ImageSequence.Iterator(gif)]
26
-
27
-
28
- def resize_gif(target_width: int, target_height: int, input_gif, output_gif):
29
- gif = Image.open(input_gif)
30
- modified_frames = []
31
- for frame in ImageSequence.Iterator(gif):
32
- resized_frame = frame.resize((target_width, target_height), Image.LANCZOS)
33
- modified_frames.append(resized_frame)
34
-
35
- frame_durations = get_frame_dur(gif)
36
- # 将修改后的帧作为新的 GIF 保存
37
- modified_frames[0].save(
38
- output_gif,
39
- format="GIF",
40
- append_images=modified_frames[1:],
41
- save_all=True,
42
- duration=frame_durations,
43
- loop=0,
44
- )
45
-
46
- return output_gif
47
-
48
-
49
- # outer func
50
- def infer(video_path: str, speed: float, target_w=640, cache=f"{TMP_DIR}/gif"):
51
- status = "Success"
52
- gif_name = gif_out = None
53
- try:
54
- clean_dir(cache)
55
- with VideoFileClip(video_path, audio_fps=16000) as clip:
56
- if clip.duration > 5:
57
- raise ValueError("上传的视频过长")
58
-
59
- clip.speedx(speed).to_gif(f"{cache}/input.gif", fps=12)
60
- w, h = clip.size
61
-
62
- gif_in = f"{cache}/input.gif"
63
- target_h = math.ceil(target_w * h / w)
64
- gif_name = os.path.basename(video_path)
65
- gif_out = resize_gif(target_w, target_h, gif_in, f"{cache}/output.gif")
66
-
67
- except Exception as e:
68
- status = f"{e}"
69
-
70
- return status, gif_name, gif_out
71
-
72
-
73
- def video2gif():
74
- example = (
75
- "https://www.modelscope.cn/studio/Genius-Society/online_tools/resolve/master"
76
- if EN_US
77
- else "."
78
- )
79
- return gr.Interface(
80
- fn=infer,
81
- inputs=[
82
- gr.Video(
83
- label=_L(
84
- "上传视频 (请确保上传完整后再点击提交,若时长大于五秒可先在线裁剪)"
85
- )
86
- ),
87
- gr.Slider(label=_L("倍速"), minimum=0.5, maximum=2.0, step=0.25, value=1.0),
88
- ],
89
- outputs=[
90
- gr.Textbox(label=_L("状态栏"), show_copy_button=True),
91
- gr.Textbox(label=_L("文件名"), show_copy_button=True),
92
- gr.Image(label=_L("下载动图"), type="filepath", show_share_button=False),
93
- ],
94
- flagging_mode="never",
95
- examples=[[f"{example}/examples/herta.mp4", 2]],
96
- cache_examples=False,
97
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/rct.py DELETED
@@ -1,91 +0,0 @@
1
- import csv
2
- import random
3
- import pandas as pd
4
- import gradio as gr
5
- from utils import clean_dir, TMP_DIR, EN_US
6
-
7
- ZH2EN = {
8
- "输入参与者数量": "Number of participants",
9
- "输入分组比率 (格式为用:隔开的数字,生成随机分组数据)": "Grouping ratio (numbers separated by : to generate randomized controlled trial)",
10
- "状态栏": "Status",
11
- "下载随机分组数据 CSV": "Download data CSV",
12
- "随机分组数据预览": "Data preview",
13
- }
14
-
15
-
16
- def _L(zh_txt: str):
17
- return ZH2EN[zh_txt] if EN_US else zh_txt
18
-
19
-
20
- def list_to_csv(list_of_dicts: list, filename: str):
21
- keys = dict(list_of_dicts[0]).keys()
22
- # 将列表中的字典写入 CSV 文件
23
- with open(filename, "w", newline="", encoding="utf-8") as csvfile:
24
- writer = csv.DictWriter(csvfile, fieldnames=keys)
25
- writer.writeheader()
26
- for data in list_of_dicts:
27
- writer.writerow(data)
28
-
29
-
30
- def random_allocate(participants: int, ratio: list, out_csv: str):
31
- splits = [0]
32
- total = sum(ratio)
33
- for i, r in enumerate(ratio):
34
- splits.append(splits[i] + int(1.0 * r / total * participants))
35
-
36
- splits[-1] = participants
37
- partist = list(range(1, participants + 1))
38
- random.shuffle(partist)
39
- allocation = []
40
- groups = len(ratio)
41
- for i in range(groups):
42
- start = splits[i]
43
- end = splits[i + 1]
44
- for participant in partist[start:end]:
45
- allocation.append({"id": participant, "group": i + 1})
46
-
47
- sorted_data = sorted(allocation, key=lambda x: x["id"])
48
- list_to_csv(sorted_data, out_csv)
49
- return out_csv, pd.DataFrame(sorted_data)
50
-
51
-
52
- # outer func
53
- def infer(participants: float, ratios: str, cache=f"{TMP_DIR}/rct"):
54
- ratio = []
55
- status = "Success"
56
- out_csv = previews = None
57
- try:
58
- ratio_list = ratios.split(":")
59
- clean_dir(cache)
60
- for r in ratio_list:
61
- current_ratio = float(r.strip())
62
- if current_ratio > 0:
63
- ratio.append(current_ratio)
64
-
65
- out_csv, previews = random_allocate(
66
- int(participants), ratio, f"{cache}/output.csv"
67
- )
68
-
69
- except Exception as e:
70
- status = f"{e}"
71
-
72
- return status, out_csv, previews
73
-
74
-
75
- def rct_generator():
76
- return gr.Interface(
77
- fn=infer,
78
- inputs=[
79
- gr.Number(label=_L("输入参与者数量"), value=10),
80
- gr.Textbox(
81
- label=_L("输入分组比率 (格式为用:隔开的数字,生成随机分组数据)"),
82
- value="8:1:1",
83
- ),
84
- ],
85
- outputs=[
86
- gr.Textbox(label=_L("状态栏"), show_copy_button=True),
87
- gr.File(label=_L("下载随机分组数据 CSV")),
88
- gr.Dataframe(label=_L("随机分组数据预览")),
89
- ],
90
- flagging_mode="never",
91
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/splitter.py DELETED
@@ -1,59 +0,0 @@
1
- import math
2
- import gradio as gr
3
- from utils import EN_US
4
-
5
- ZH2EN = {
6
- "待分割字符串": "String to be split",
7
- "分割步长": "Split step",
8
- "状态栏": "Status",
9
- "分割结果": "Split result",
10
- }
11
-
12
-
13
- def _L(zh_txt: str):
14
- return ZH2EN[zh_txt] if EN_US else zh_txt
15
-
16
-
17
- def infer(cookie: str, step: int):
18
- status = "Success"
19
- output = ""
20
- try:
21
- cookie = cookie.strip()
22
- if not cookie:
23
- raise ValueError("请输入 cookie !")
24
-
25
- size = len(cookie)
26
- count = math.ceil(size / step)
27
- for i in range(count):
28
- output += f"""
29
- ## {i + 1}
30
- ```txt
31
- {cookie[i * step : min((i + 1) * step, size)]}
32
- ```
33
- """
34
-
35
- except Exception as e:
36
- status = f"{e}"
37
-
38
- return status, output
39
-
40
-
41
- def str_splitter():
42
- return gr.Interface(
43
- fn=infer,
44
- inputs=[
45
- gr.TextArea(label=_L("待分割字符串")),
46
- gr.Slider(
47
- label=_L("分割步长"),
48
- minimum=1,
49
- maximum=255959,
50
- step=1,
51
- value=1024,
52
- ),
53
- ],
54
- outputs=[
55
- gr.Textbox(label=_L("状态栏"), show_copy_button=True),
56
- gr.Markdown(label=_L("分割结果"), container=True, show_copy_button=True),
57
- ],
58
- flagging_mode="never",
59
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
modules/tools.py DELETED
@@ -1,9 +0,0 @@
1
- import gradio as gr
2
- from utils import EN_US
3
-
4
-
5
- def webtools():
6
- domain = "static.hf.space" if EN_US else "ms.show"
7
- return gr.HTML(
8
- f"<iframe src='https://genius-society-web-tools.{domain}' width='100%' style='aspect-ratio: 16 / 9;'></iframe>"
9
- )
 
 
 
 
 
 
 
 
 
 
utils.py CHANGED
@@ -1,17 +1,15 @@
1
  import os
2
  import shutil
3
- import zipfile
4
 
5
  EN_US = os.getenv("LANG") != "zh_CN.UTF-8"
6
- TMP_DIR = "./__pycache__"
7
- HEADER = {
8
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36 Edg/132.0.0.0",
9
- }
10
 
11
 
12
- def mk_dir(dir_path: str):
13
- if not os.path.exists(dir_path):
14
- os.makedirs(dir_path)
15
 
16
 
17
  def clean_dir(dir_path: str):
@@ -21,27 +19,12 @@ def clean_dir(dir_path: str):
21
  os.makedirs(dir_path)
22
 
23
 
24
- def unzip(zip_path: str, extract_to: str):
25
- mk_dir(extract_to)
26
- # 打开ZIP文件
27
- with zipfile.ZipFile(zip_path, "r") as zip_ref:
28
- # 解压文件
29
- zip_ref.extractall(extract_to)
30
-
31
-
32
- def compress(folder_path: str, zip_file: str):
33
- # 确保文件夹存在
34
- if not os.path.exists(folder_path):
35
- raise ValueError(f"错误: 文件夹 '{folder_path}' 不存在")
36
- # 打开 ZIP 文件,使用 'w' 模式表示写入
37
- with zipfile.ZipFile(zip_file, "w", zipfile.ZIP_DEFLATED) as zipf:
38
- # 遍历文件夹中的文件和子文件夹
39
- for root, _, files in os.walk(folder_path):
40
- for file in files:
41
- file_path = os.path.join(root, file)
42
- # 计算相对路径,保留文件夹的根目录
43
- relative_path = os.path.relpath(file_path, folder_path)
44
- zipf.write(
45
- file_path,
46
- arcname=os.path.join(os.path.basename(folder_path), relative_path),
47
- )
 
1
  import os
2
  import shutil
3
+ import requests
4
 
5
  EN_US = os.getenv("LANG") != "zh_CN.UTF-8"
6
+ API_QR = os.getenv("api_qr")
7
+ if not API_QR:
8
+ print("请检查环境变量")
9
+ exit()
10
 
11
 
12
+ TMP_DIR = "./__pycache__"
 
 
13
 
14
 
15
  def clean_dir(dir_path: str):
 
19
  os.makedirs(dir_path)
20
 
21
 
22
+ def download_file(url, local_filename):
23
+ clean_dir(os.path.dirname(local_filename))
24
+ response = requests.get(url, stream=True)
25
+ response.raise_for_status()
26
+ with open(local_filename, "wb") as f:
27
+ for chunk in response.iter_content(chunk_size=8192):
28
+ f.write(chunk)
29
+
30
+ return local_filename