Shi-Jie commited on
Commit
adc0e4e
·
1 Parent(s): f44425f

update files

Browse files
Files changed (12) hide show
  1. .gitattributes +1 -0
  2. .gitignore +15 -0
  3. .pre-commit-config.yaml +7 -0
  4. README.md +2 -2
  5. app.py +201 -0
  6. config.yaml +31 -0
  7. demo-neg.jpg +3 -0
  8. demo-pos.jpg +3 -0
  9. demo_webui.py +200 -0
  10. model/api.yaml +45 -0
  11. model/hf.yaml +25 -0
  12. pyproject.toml +40 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.jpg filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Environments
2
+ .env
3
+ .venv
4
+ env/
5
+ venv/
6
+ ENV/
7
+ env.bak/
8
+ venv.bak/
9
+ pyvenv.cfg
10
+ share/
11
+ bin/
12
+
13
+ # PyCharm files
14
+ .idea/
15
+ .cursorignore
.pre-commit-config.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/astral-sh/ruff-pre-commit
3
+ rev: v0.9.4
4
+ hooks:
5
+ - id: ruff
6
+ args: [ --fix ]
7
+ - id: ruff-format
README.md CHANGED
@@ -9,5 +9,5 @@ app_file: app.py
9
  pinned: false
10
  license: mit
11
  ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
9
  pinned: false
10
  license: mit
11
  ---
12
+
13
+ # Multi-modality Misogyny Moderation For Vividhata
app.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ from io import BytesIO
3
+ from pathlib import Path
4
+ from urllib.parse import urlparse
5
+
6
+ import dotenv
7
+ import gradio as gr
8
+ import requests
9
+ from clients import get_client_module
10
+ from hf_datasets import dataset_rootdir
11
+ from omegaconf import DictConfig, OmegaConf
12
+ from PIL import Image
13
+ from prompts import get_prompt_module
14
+
15
+
16
+ dotenv.load_dotenv()
17
+
18
+ prompt_versions = [d.stem for d in Path("./prompts").iterdir() if d.is_file() and not d.name.startswith("_")]
19
+
20
+
21
+ class ConfigManager:
22
+ def __init__(self):
23
+ self.configs: dict = {} # internal configs for all models
24
+ self.ignore_keys = ["type", "client_name", "model_name"]
25
+
26
+ # initialize configs
27
+ self.update()
28
+
29
+ def update(self):
30
+ """Reload configs"""
31
+ self.configs.clear() # remove cache
32
+
33
+ # reload API-based models
34
+ configs = OmegaConf.load("./model/api.yaml")
35
+ configs = {key: configs[key] for key in configs if key not in self.ignore_keys}
36
+ self.configs.update(configs)
37
+
38
+ # reload HF-based models
39
+ configs = OmegaConf.load("./model/hf.yaml")
40
+ configs = {key: configs[key] for key in configs if key not in self.ignore_keys}
41
+ self.configs.update({"huggingface": DictConfig(configs)})
42
+
43
+ def clients(self):
44
+ """Display all available clients"""
45
+ return list(self.configs.keys())
46
+
47
+ def models(self, client=None):
48
+ if client is None:
49
+ client = self.clients()[0]
50
+ return list(self.configs[client].available_models)
51
+
52
+
53
+ config_manager = ConfigManager()
54
+
55
+
56
+ def link_client_and_model(client, model): # noqa
57
+ all_models = config_manager.models(client)
58
+ return gr.Dropdown(choices=all_models, value=all_models[0])
59
+
60
+
61
+ def display_prompt(prompt_version):
62
+ prompt_module = get_prompt_module(prompt_version)
63
+ description = prompt_module.description()
64
+ return description
65
+
66
+
67
+ def encode_image(image):
68
+ buffered = BytesIO()
69
+ image.save(buffered, format="PNG")
70
+ return base64.b64encode(buffered.getvalue()).decode("utf-8")
71
+
72
+
73
+ def load_image(image_url_or_path, timeout=None):
74
+ result = urlparse(image_url_or_path)
75
+ if result.scheme in ("http", "https") and result.netloc and result.path:
76
+ image = Image.open(BytesIO(requests.get(image_url_or_path, timeout=timeout).content))
77
+
78
+ elif Path(image_url_or_path).is_file():
79
+ image = Image.open(image_url_or_path)
80
+ else:
81
+ if image_url_or_path.startswith("data:image/"):
82
+ image_url_or_path = image_url_or_path.split(",")[1]
83
+
84
+ # Try to load as base64
85
+ try:
86
+ base64_image = base64.decodebytes(image_url_or_path.encode())
87
+ image = Image.open(BytesIO(base64_image))
88
+
89
+ except Exception:
90
+ raise gr.Error(
91
+ "Incorrect image source. Must be a valid URL starting with `http://` or `https://`, "
92
+ "a valid path to an image file, or a base64 encoded string."
93
+ )
94
+ return image
95
+
96
+
97
+ def llm_analyse(client, model, api_key, image, prompt):
98
+ try:
99
+ prompt_module = get_prompt_module(prompt)
100
+ client_module = get_client_module(client)
101
+
102
+ base64_image = f"data:image/png;base64,{encode_image(image)}"
103
+
104
+ if api_key == "":
105
+ api_key = None
106
+
107
+ result = client_module.sync_generate(base64_image, prompt_module.messages_encoder, model, api_key=api_key)
108
+ return result
109
+
110
+ except Exception as e:
111
+ return gr.Error(f"Error processing image: {e}")
112
+
113
+
114
+ with gr.Blocks(
115
+ theme=gr.themes.Default(primary_hue="orange"),
116
+ css="""
117
+ #app-container { max-width: 1400px; margin: auto; padding: 10px; }
118
+ #title { text-align: center; margin-bottom: 10px; font-size: 24px; }
119
+ #groq-badge { text-align: center; margin-top: 10px; }
120
+ .gr-button { border-radius: 15px; }
121
+ .gr-input, .gr-box { border-radius: 10px; }
122
+ .gr-form { gap: 5px; }
123
+ .gr-block.gr-box { padding: 10px; }
124
+ .gr-paddle { height: auto; }
125
+ """,
126
+ ) as demo:
127
+ gr.Markdown("# Image Moderation WebUI", elem_id="title")
128
+
129
+ # --------------- Client and Model Selection Block --------------- #
130
+ with gr.Row(equal_height=True):
131
+ with gr.Column(scale=3):
132
+ prompt_version_input = gr.Dropdown(
133
+ prompt_versions,
134
+ value="-- Please Select --",
135
+ allow_custom_value=True,
136
+ label="Choose Prompt:",
137
+ )
138
+
139
+ client_input = gr.Dropdown(
140
+ config_manager.clients(), label="Choose Client:", info="HuggingFace Requires a GPU"
141
+ )
142
+
143
+ model_input = gr.Dropdown(config_manager.models(), label="Choose Model:")
144
+
145
+ api_input = gr.Textbox(
146
+ type="password",
147
+ label="API Key:",
148
+ info="Leave this field blank to use the default key, or if you are using HuggingFace",
149
+ )
150
+
151
+ image_input = gr.Image(type="pil", label="Upload Image:", height=300, sources=["upload"])
152
+ url_input = gr.Textbox(
153
+ label="or Paste Image URL, Local File Path, or Base64 String:",
154
+ info="Press Enter to load the image",
155
+ lines=1,
156
+ )
157
+
158
+ with gr.Row():
159
+ with gr.Column(scale=1, min_width=160):
160
+ pos_button = gr.Button("👍 Positive Demo")
161
+ with gr.Column(scale=1, min_width=160):
162
+ neg_button = gr.Button("👎 Negative Demo")
163
+
164
+ with gr.Column(scale=5):
165
+ prompt_text_input = gr.Textbox(label="or Paste Prompt Here:", lines=18)
166
+ model_output = gr.Textbox(label="Model Output:", lines=18)
167
+
168
+ with gr.Row():
169
+ with gr.Column(scale=1, min_width=120):
170
+ analyze_button = gr.Button("🚀 Analyze Image", variant="primary")
171
+ with gr.Column(scale=1, min_width=120):
172
+ clean_button = gr.Button("🧹 Clean Output", variant="primary")
173
+
174
+ client_input.change(fn=link_client_and_model, inputs=[client_input, model_input], outputs=model_input)
175
+
176
+ prompt_version_input.input(fn=display_prompt, inputs=prompt_version_input, outputs=prompt_text_input)
177
+
178
+ clean_button.click(fn=lambda: gr.Textbox(value=""), inputs=None, outputs=model_output)
179
+
180
+ url_input.submit(fn=load_image, inputs=url_input, outputs=image_input)
181
+
182
+ pos_button.click(
183
+ fn=lambda: load_image(Path(dataset_rootdir, "semeval2022/demo-pos.jpg").as_posix()),
184
+ inputs=None,
185
+ outputs=image_input,
186
+ )
187
+ neg_button.click(
188
+ fn=lambda: load_image(Path(dataset_rootdir, "semeval2022/demo-neg.jpg").as_posix()),
189
+ inputs=None,
190
+ outputs=image_input,
191
+ )
192
+
193
+ # ------------------------- Image Analysis Block ------------------------- #
194
+ analyze_button.click(
195
+ fn=llm_analyse,
196
+ inputs=[client_input, model_input, api_input, image_input, prompt_version_input],
197
+ outputs=model_output,
198
+ )
199
+
200
+
201
+ demo.launch(share=False)
config.yaml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # hydra/cli specific settings
2
+ hydra:
3
+ run:
4
+ # where to store run results
5
+ dir: outputs/${dataset_name}-${dataset_split}/${model.client_name}-${model.model_name}/prompt-${prompt_version}-${now:%y%m%d_%H%M%S}
6
+ output_subdir: null
7
+ job:
8
+ # change the working directory to the run directory
9
+ chdir: false
10
+ sweep:
11
+ dir: multirun
12
+ # change the working directory to the run directory
13
+ subdir: ${hydra.job.override_dirname}
14
+
15
+ defaults:
16
+ # can be hf or api
17
+ - model: ???
18
+ # for hydra 1.1 compatibility
19
+ - _self_
20
+
21
+ prompt_version: v3
22
+
23
+ dataset_name: semeval2022
24
+ dataset_split: validation
25
+
26
+ # if batch_mode is set to false, standard asynchronous inference will be used.
27
+ # if batch_mode is set to true, then:
28
+ # 1) if batch_job_ids are provided, their corresponding results will be fetched and concatenated as the prediction output.
29
+ # 2) if batch_job_ids are not provided, a batched inference job will be executed and new job ids will be produced.
30
+ batch_mode: false
31
+ batch_job_ids:
demo-neg.jpg ADDED

Git LFS Details

  • SHA256: 4337026c80412fd804bec02e14909107e4373a9a264627a9c40c737413916095
  • Pointer size: 131 Bytes
  • Size of remote file: 190 kB
demo-pos.jpg ADDED

Git LFS Details

  • SHA256: 712a7bf161070d9e1a87634fb9252e4c8f456f451b83fa24ea90803b0e969030
  • Pointer size: 131 Bytes
  • Size of remote file: 116 kB
demo_webui.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ from io import BytesIO
3
+ from pathlib import Path
4
+ from urllib.parse import urlparse
5
+
6
+ import dotenv
7
+ import gradio as gr
8
+ import requests
9
+ from clients import get_client_module
10
+ from omegaconf import DictConfig, OmegaConf
11
+ from PIL import Image
12
+ from prompts import get_prompt_module
13
+
14
+
15
+ dotenv.load_dotenv()
16
+
17
+ prompt_versions = [d.stem for d in Path("./prompts").iterdir() if d.is_file() and not d.name.startswith("_")]
18
+
19
+
20
+ class ConfigManager:
21
+ def __init__(self):
22
+ self.configs: dict = {} # internal configs for all models
23
+ self.ignore_keys = ["type", "client_name", "model_name"]
24
+
25
+ # initialize configs
26
+ self.update()
27
+
28
+ def update(self):
29
+ """Reload configs"""
30
+ self.configs.clear() # remove cache
31
+
32
+ # reload API-based models
33
+ configs = OmegaConf.load("./model/api.yaml")
34
+ configs = {key: configs[key] for key in configs if key not in self.ignore_keys}
35
+ self.configs.update(configs)
36
+
37
+ # reload HF-based models
38
+ configs = OmegaConf.load("./model/hf.yaml")
39
+ configs = {key: configs[key] for key in configs if key not in self.ignore_keys}
40
+ self.configs.update({"huggingface": DictConfig(configs)})
41
+
42
+ def clients(self):
43
+ """Display all available clients"""
44
+ return list(self.configs.keys())
45
+
46
+ def models(self, client=None):
47
+ if client is None:
48
+ client = self.clients()[0]
49
+ return list(self.configs[client].available_models)
50
+
51
+
52
+ config_manager = ConfigManager()
53
+
54
+
55
+ def link_client_and_model(client, model): # noqa
56
+ all_models = config_manager.models(client)
57
+ return gr.Dropdown(choices=all_models, value=all_models[0])
58
+
59
+
60
+ def display_prompt(prompt_version):
61
+ prompt_module = get_prompt_module(prompt_version)
62
+ description = prompt_module.description()
63
+ return description
64
+
65
+
66
+ def encode_image(image):
67
+ buffered = BytesIO()
68
+ image.save(buffered, format="PNG")
69
+ return base64.b64encode(buffered.getvalue()).decode("utf-8")
70
+
71
+
72
+ def load_image(image_url_or_path, timeout=None):
73
+ result = urlparse(image_url_or_path)
74
+ if result.scheme in ("http", "https") and result.netloc and result.path:
75
+ image = Image.open(BytesIO(requests.get(image_url_or_path, timeout=timeout).content))
76
+
77
+ elif Path(image_url_or_path).is_file():
78
+ image = Image.open(image_url_or_path)
79
+ else:
80
+ if image_url_or_path.startswith("data:image/"):
81
+ image_url_or_path = image_url_or_path.split(",")[1]
82
+
83
+ # Try to load as base64
84
+ try:
85
+ base64_image = base64.decodebytes(image_url_or_path.encode())
86
+ image = Image.open(BytesIO(base64_image))
87
+
88
+ except Exception:
89
+ raise gr.Error(
90
+ "Incorrect image source. Must be a valid URL starting with `http://` or `https://`, "
91
+ "a valid path to an image file, or a base64 encoded string."
92
+ )
93
+ return image
94
+
95
+
96
+ def llm_analyse(client, model, api_key, image, prompt):
97
+ try:
98
+ prompt_module = get_prompt_module(prompt)
99
+ client_module = get_client_module(client)
100
+
101
+ base64_image = f"data:image/png;base64,{encode_image(image)}"
102
+
103
+ if api_key == "":
104
+ api_key = None
105
+
106
+ result = client_module.sync_generate(base64_image, prompt_module.messages_encoder, model, api_key=api_key)
107
+ return result
108
+
109
+ except Exception as e:
110
+ return gr.Error(f"Error processing image: {e}")
111
+
112
+
113
+ with gr.Blocks(
114
+ theme=gr.themes.Default(primary_hue="orange"),
115
+ css="""
116
+ #app-container { max-width: 1400px; margin: auto; padding: 10px; }
117
+ #title { text-align: center; margin-bottom: 10px; font-size: 24px; }
118
+ #groq-badge { text-align: center; margin-top: 10px; }
119
+ .gr-button { border-radius: 15px; }
120
+ .gr-input, .gr-box { border-radius: 10px; }
121
+ .gr-form { gap: 5px; }
122
+ .gr-block.gr-box { padding: 10px; }
123
+ .gr-paddle { height: auto; }
124
+ """,
125
+ ) as demo:
126
+ gr.Markdown("# Image Moderation WebUI", elem_id="title")
127
+
128
+ # --------------- Client and Model Selection Block --------------- #
129
+ with gr.Row(equal_height=True):
130
+ with gr.Column(scale=3):
131
+ prompt_version_input = gr.Dropdown(
132
+ prompt_versions,
133
+ value="-- Please Select --",
134
+ allow_custom_value=True,
135
+ label="Choose Prompt:",
136
+ )
137
+
138
+ client_input = gr.Dropdown(
139
+ config_manager.clients(), label="Choose Client:", info="HuggingFace Requires a GPU"
140
+ )
141
+
142
+ model_input = gr.Dropdown(config_manager.models(), label="Choose Model:")
143
+
144
+ api_input = gr.Textbox(
145
+ type="password",
146
+ label="API Key:",
147
+ info="Leave this field blank to use the default key, or if you are using HuggingFace",
148
+ )
149
+
150
+ image_input = gr.Image(type="pil", label="Upload Image:", height=300, sources=["upload"])
151
+ url_input = gr.Textbox(
152
+ label="or Paste Image URL, Local File Path, or Base64 String:",
153
+ info="Press Enter to load the image",
154
+ lines=1,
155
+ )
156
+
157
+ with gr.Row():
158
+ with gr.Column(scale=1, min_width=160):
159
+ pos_button = gr.Button("👍 Positive Demo")
160
+ with gr.Column(scale=1, min_width=160):
161
+ neg_button = gr.Button("👎 Negative Demo")
162
+
163
+ with gr.Column(scale=5):
164
+ prompt_text_input = gr.Textbox(label="or Paste Prompt Here:", lines=18)
165
+ model_output = gr.Textbox(label="Model Output:", lines=18)
166
+
167
+ with gr.Row():
168
+ with gr.Column(scale=1, min_width=120):
169
+ analyze_button = gr.Button("🚀 Analyze Image", variant="primary")
170
+ with gr.Column(scale=1, min_width=120):
171
+ clean_button = gr.Button("🧹 Clean Output", variant="primary")
172
+
173
+ client_input.change(fn=link_client_and_model, inputs=[client_input, model_input], outputs=model_input)
174
+
175
+ prompt_version_input.input(fn=display_prompt, inputs=prompt_version_input, outputs=prompt_text_input)
176
+
177
+ clean_button.click(fn=lambda: gr.Textbox(value=""), inputs=None, outputs=model_output)
178
+
179
+ url_input.submit(fn=load_image, inputs=url_input, outputs=image_input)
180
+
181
+ pos_button.click(
182
+ fn=lambda: load_image(Path("./demo-pos.jpg").as_posix()),
183
+ inputs=None,
184
+ outputs=image_input,
185
+ )
186
+ neg_button.click(
187
+ fn=lambda: load_image(Path("./demo-neg.jpg").as_posix()),
188
+ inputs=None,
189
+ outputs=image_input,
190
+ )
191
+
192
+ # ------------------------- Image Analysis Block ------------------------- #
193
+ analyze_button.click(
194
+ fn=llm_analyse,
195
+ inputs=[client_input, model_input, api_input, image_input, prompt_version_input],
196
+ outputs=model_output,
197
+ )
198
+
199
+
200
+ demo.launch(share=False)
model/api.yaml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ type: API
2
+
3
+ client_name: openai
4
+ model_name: ${model.${model.client_name}.model}
5
+
6
+ openai:
7
+ model: "gpt-4o-mini"
8
+ api_key: ${oc.env:OPENAI_API_KEY}
9
+ available_models:
10
+ # 0.000425, fixed,
11
+ - gpt-4o-mini
12
+ # 0.000213, fixed
13
+ - gpt-4o
14
+ # 0.000098, max
15
+ - gpt-4.1-nano
16
+ # 0.000259, max
17
+ - gpt-4.1-mini
18
+ # 0.00017, fixed
19
+ - gpt-4.1
20
+
21
+ together:
22
+ model: "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo"
23
+ api_key: ${oc.env:TOGETHER_API_KEY}
24
+ available_models:
25
+ - "meta-llama/Llama-Vision-Free"
26
+ - "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo"
27
+ - "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo"
28
+
29
+ # moderation models
30
+ - "meta-llama/Llama-Guard-3-11B-Vision-Turbo"
31
+
32
+
33
+
34
+ groq:
35
+ client:
36
+ _target_: groq.Groq
37
+
38
+ async_client:
39
+ _target_: groq.AsyncGroq
40
+
41
+ api_key: ${oc.env:GROQ_API_KEY}
42
+
43
+ model: "meta-llama/llama-4-scout-17b-16e-instruct"
44
+ available_models:
45
+ - "meta-llama/llama-4-scout-17b-16e-instruct"
model/hf.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ type: HF
2
+
3
+ client_name: huggingface
4
+
5
+ model_name: unsloth/Llama-3.2-11B-Vision-Instruct-bnb-4bit
6
+
7
+ available_models:
8
+ - unsloth/Llama-3.2-11B-Vision-Instruct-bnb-4bit
9
+
10
+ model_loader:
11
+ _target_: transformers.AutoModelForImageTextToText.from_pretrained
12
+ pretrained_model_name_or_path: ${model.model_name}
13
+ device_map: auto
14
+ trust_remote_code: true
15
+
16
+ processor_loader:
17
+ _target_: transformers.AutoProcessor.from_pretrained
18
+ pretrained_model_name_or_path: ${model.model_name}
19
+ trust_remote_code: true
20
+
21
+
22
+
23
+
24
+
25
+
pyproject.toml ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.ruff]
2
+ line-length = 119
3
+
4
+ [tool.ruff.lint]
5
+ select = [
6
+ "C", # flake8-comprehensions
7
+ "E", # pycodestyle-error
8
+ "W", # pycodestyle-warning
9
+ "F", # Pyflakes
10
+ "I", # isort
11
+ ]
12
+ ignore = [
13
+ "C901", # complex-structure
14
+ "E402", # module-import-not-at-top-of-file
15
+ "E501", # line-too-long
16
+ "E741", # ambiguous-variable-name
17
+ ]
18
+
19
+
20
+ # Ignore import violations in all `__init__.py` files.
21
+ [tool.ruff.lint.per-file-ignores]
22
+ "__init__.py" = [
23
+ "F403", # undefined-local-with-import-star
24
+ ]
25
+
26
+ [tool.ruff.lint.isort]
27
+ lines-after-imports = 2
28
+
29
+ [tool.ruff.format]
30
+ # Like Black, use double quotes for strings.
31
+ quote-style = "double"
32
+
33
+ # Like Black, indent with spaces, rather than tabs.
34
+ indent-style = "space"
35
+
36
+ # Like Black, respect magic trailing commas.
37
+ skip-magic-trailing-comma = false
38
+
39
+ # Like Black, automatically detect the appropriate line ending.
40
+ line-ending = "auto"