nuriyev commited on
Commit
adfaee6
·
1 Parent(s): 233a360

improve app.py. sync with hf spaces.

Browse files
Files changed (5) hide show
  1. .github/workflows/sync_hf.yml +18 -0
  2. .gitignore +8 -0
  3. app.py +373 -0
  4. helpers/utils.py +5 -1
  5. requirements.txt +6 -0
.github/workflows/sync_hf.yml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face hub
2
+ on:
3
+ push:
4
+ branches: [main]
5
+ workflow_dispatch:
6
+
7
+ jobs:
8
+ sync-to-hub:
9
+ runs-on: ubuntu-latest
10
+ steps:
11
+ - uses: actions/checkout@v3
12
+ with:
13
+ fetch-depth: 0
14
+ lfs: true
15
+ - name: Push to hub
16
+ env:
17
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
18
+ run: git push -f https://nuriyev:$HF_TOKEN@huggingface.co/spaces/nuriyev/text2mcdm main
.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ .*
2
+ !.gitignore
3
+ !.github/
4
+ trash/
5
+ __pycache__/
6
+ outputs/
7
+ unsloth_compiled_cache/
8
+ lora_model/
app.py ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ import torch
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
+ from znum import Znum, Topsis, Promethee, Beast
6
+ import re
7
+ from helpers.utils import DEFAULT_QUERY, DEFAULT_QUERY2, DEFAULT_QUERY3
8
+
9
+ # Z-number mappings: value/confidence (1-5) to fuzzy trapezoidal numbers
10
+ A_MAP = {
11
+ 1: [2, 3, 3, 4],
12
+ 2: [4, 5, 5, 6],
13
+ 3: [6, 7, 7, 8],
14
+ 4: [8, 9, 9, 10],
15
+ 5: [10, 11, 11, 12],
16
+ }
17
+
18
+ B_MAP = {
19
+ 1: [0.2, 0.3, 0.3, 0.4],
20
+ 2: [0.3, 0.4, 0.4, 0.5],
21
+ 3: [0.4, 0.5, 0.5, 0.6],
22
+ 4: [0.5, 0.6, 0.6, 0.7],
23
+ 5: [0.6, 0.7, 0.7, 0.8],
24
+ }
25
+
26
+ SYSTEM_PROMPT = """\
27
+ Extract a Z-number decision matrix from the following user input.
28
+
29
+ ## Z-Number Scales:
30
+ - Value (A-part):
31
+ - benefit: 5 (excellent) → 4 (good) → 3 (moderate) → 2 (poor) → 1 (very poor)
32
+ - neutral: 0
33
+ - cost: -1 (very low cost) → -2 (low) → -3 (moderate) → -4 (high) → -5 (very high cost)
34
+ - Confidence (B-part): 5 (very confident) → 4 (confident) → 3 (somewhat confident) → 2 (uncertain) → 1 (very uncertain)
35
+
36
+ ## Output Format:
37
+ Return ONLY a Markdown table in this exact format:
38
+
39
+ | | criterion_1 | criterion_2 | ... |
40
+ |---|---|---|---|
41
+ | type | benefit | cost | ... |
42
+ | alt_1 | 4:3 | -3:4 | ... |
43
+ | alt_2 | 3:4 | -2:5 | ... |
44
+ | ... | ... | ... | ... |
45
+ | weight | 3:2 | 4:3 | ... |
46
+
47
+ ## Rules:
48
+ 1. First row: empty cell, then criterion names (alphanumeric + underscores only)
49
+ 2. Second row: "type", then either "benefit" or "cost" for each criterion
50
+ 3. Middle rows: alternative names, then VALUE:CONFIDENCE pairs
51
+ 4. Last row: "weight", then importance weights as VALUE:CONFIDENCE (always use positive values 1-5 for weights)
52
+ 5. VALUE must be positive (1-5) for benefits, negative (-1 to -5) for costs
53
+ 6. CONFIDENCE is always positive (1-5) regardless of criterion type
54
+ """
55
+
56
+ # Global model and tokenizer (loaded once)
57
+ model = None
58
+ tokenizer = None
59
+
60
+
61
+ def load_model():
62
+ """Load model and tokenizer (called once on first inference)."""
63
+ global model, tokenizer
64
+ if model is None:
65
+ model_name = "nuriyev/Qwen3-4B-znum-decision-matrix"
66
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
67
+ model = AutoModelForCausalLM.from_pretrained(
68
+ model_name,
69
+ device_map="auto",
70
+ torch_dtype=torch.bfloat16,
71
+ )
72
+ return model, tokenizer
73
+
74
+
75
+ def parse_znum_pair(pair_str: str) -> Znum | None:
76
+ """Convert 'N:M' string to Znum object using A_MAP and B_MAP."""
77
+ try:
78
+ parts = pair_str.strip().split(':')
79
+ if len(parts) != 2:
80
+ return None
81
+ a_val = abs(int(parts[0]))
82
+ b_val = int(parts[1])
83
+ if a_val not in A_MAP or b_val not in B_MAP:
84
+ return None
85
+ return Znum(A_MAP[a_val], B_MAP[b_val])
86
+ except (ValueError, KeyError):
87
+ return None
88
+
89
+
90
+ def parse_markdown_table(text: str) -> dict:
91
+ """Parse markdown table from model output into structured dict."""
92
+ lines = [l.strip() for l in text.strip().split('\n') if l.strip() and '|' in l]
93
+ lines = [l for l in lines if not re.match(r'^\|[-:\s|]+\|$', l)]
94
+
95
+ if len(lines) < 4:
96
+ return {}
97
+
98
+ def split_row(row: str) -> list:
99
+ cells = [c.strip() for c in row.split('|')]
100
+ return [c for c in cells if c]
101
+
102
+ headers = split_row(lines[0])
103
+ criteria = headers # All headers are criteria (empty first cell is filtered out)
104
+
105
+ types_row = split_row(lines[1])
106
+ types = types_row[1:] if len(types_row) > 1 else []
107
+
108
+ weights_row = split_row(lines[-1])
109
+ weights = weights_row[1:] if len(weights_row) > 1 else []
110
+
111
+ alternatives = {}
112
+ for line in lines[2:-1]:
113
+ row = split_row(line)
114
+ if row:
115
+ alt_name = row[0]
116
+ values = row[1:]
117
+ alternatives[alt_name] = values
118
+
119
+ return {
120
+ 'criteria': criteria,
121
+ 'types': types,
122
+ 'alternatives': alternatives,
123
+ 'weights': weights
124
+ }
125
+
126
+
127
+ def format_table_html(matrix: dict) -> str:
128
+ """Convert parsed matrix to a nicely formatted HTML table."""
129
+ if not matrix or not matrix.get('criteria'):
130
+ return "<p style='color:#666;'>No decision matrix generated yet.</p>"
131
+
132
+ html = """
133
+ <div style="overflow-x:auto;">
134
+ <table style="border-collapse:collapse;width:100%;font-family:system-ui,sans-serif;font-size:13px;background:#fff;">
135
+ <thead>
136
+ <tr>
137
+ <th style="border:1px solid #ddd;padding:10px 12px;text-align:left;background:#111;color:#fff;font-weight:500;">Alternative</th>
138
+ """
139
+
140
+ th_style = "border:1px solid #ddd;padding:10px 12px;text-align:center;background:#111;color:#fff;font-weight:500;"
141
+ for crit in matrix['criteria']:
142
+ html += f'<th style="{th_style}">{crit.replace("_", " ").title()}</th>'
143
+ html += "</tr></thead><tbody>"
144
+
145
+ td_style = "border:1px solid #ddd;padding:10px 12px;text-align:center;color:#333;"
146
+ td_left = "border:1px solid #ddd;padding:10px 12px;text-align:left;color:#333;font-weight:500;"
147
+
148
+ # Type row
149
+ html += f'<tr style="background:#f9f9f9;"><td style="{td_left}">Type</td>'
150
+ for t in matrix['types']:
151
+ color = "#111" if t.lower() == "benefit" else "#666"
152
+ html += f'<td style="{td_style}color:{color};">{t.capitalize()}</td>'
153
+ html += "</tr>"
154
+
155
+ # Alternative rows
156
+ for i, (alt_name, values) in enumerate(matrix['alternatives'].items()):
157
+ bg = "#fff" if i % 2 == 0 else "#f9f9f9"
158
+ html += f'<tr style="background:{bg};"><td style="{td_left}">{alt_name.replace("_", " ").title()}</td>'
159
+ for v in values:
160
+ html += f'<td style="{td_style}font-family:monospace;">{v}</td>'
161
+ html += "</tr>"
162
+
163
+ # Weight row
164
+ html += f'<tr style="background:#111;"><td style="border:1px solid #333;padding:10px 12px;text-align:left;color:#fff;font-weight:500;">Weight</td>'
165
+ for w in matrix['weights']:
166
+ html += f'<td style="border:1px solid #333;padding:10px 12px;text-align:center;color:#fff;font-family:monospace;">{w}</td>'
167
+ html += "</tr>"
168
+
169
+ html += "</tbody></table></div>"
170
+ return html
171
+
172
+
173
+ def format_results_html(alt_names: list, solver, method: str) -> str:
174
+ """Format MCDM results as HTML."""
175
+ html = f"""
176
+ <div style="font-family:system-ui,sans-serif;padding:20px;background:#111;border-radius:6px;color:#fff;">
177
+ <div style="font-size:11px;font-weight:500;margin-bottom:16px;text-transform:uppercase;letter-spacing:1px;color:#888;">{method.upper()} Ranking</div>
178
+ """
179
+
180
+ for rank, idx in enumerate(solver.ordered_indices, 1):
181
+ # Different circle colors for ranks
182
+ if rank == 1:
183
+ circle_bg, circle_color = "#fff", "#000"
184
+ elif rank == 2:
185
+ circle_bg, circle_color = "#666", "#fff"
186
+ else:
187
+ circle_bg, circle_color = "#444", "#fff"
188
+
189
+ badge = '<span style="background:#fff;color:#000;padding:3px 10px;border-radius:3px;font-size:10px;font-weight:600;text-transform:uppercase;">BEST</span>' if rank == 1 else ""
190
+
191
+ border = "border-bottom:1px solid #333;" if rank < len(solver.ordered_indices) else ""
192
+ html += f"""
193
+ <div style="display:flex;align-items:center;padding:12px 0;{border}">
194
+ <span style="width:26px;height:26px;background:{circle_bg};color:{circle_color};border-radius:50%;display:inline-flex;align-items:center;justify-content:center;font-weight:600;font-size:12px;margin-right:14px;">{rank}</span>
195
+ <span style="flex-grow:1;font-size:14px;">{alt_names[idx].replace('_', ' ').title()}</span>
196
+ {badge}
197
+ </div>
198
+ """
199
+
200
+ html += "</div>"
201
+ return html
202
+
203
+
204
+ @spaces.GPU
205
+ def process_decision(query: str, method: str, progress=gr.Progress()):
206
+ """Main processing function with ZeroGPU support."""
207
+ if not query.strip():
208
+ return "<p>Please enter a decision query.</p>", "<p>No results yet.</p>", ""
209
+
210
+ progress(0.1, desc="Loading model...")
211
+ model, tokenizer = load_model()
212
+
213
+ progress(0.2, desc="Preparing input...")
214
+ messages = [
215
+ {"role": "system", "content": SYSTEM_PROMPT},
216
+ {"role": "user", "content": query},
217
+ ]
218
+
219
+ prompt = tokenizer.apply_chat_template(
220
+ messages,
221
+ tokenize=False,
222
+ add_generation_prompt=True,
223
+ enable_thinking=False
224
+ )
225
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
226
+
227
+ progress(0.3, desc="Generating decision matrix...")
228
+ with torch.no_grad():
229
+ output_ids = model.generate(
230
+ **inputs,
231
+ max_new_tokens=2048,
232
+ temperature=0.7,
233
+ do_sample=True,
234
+ pad_token_id=tokenizer.eos_token_id
235
+ )
236
+
237
+ generated_ids = output_ids[0][inputs['input_ids'].shape[1]:]
238
+ generated_text = tokenizer.decode(generated_ids, skip_special_tokens=True)
239
+
240
+ progress(0.6, desc="Parsing decision matrix...")
241
+ matrix = parse_markdown_table(generated_text)
242
+
243
+ if not matrix or not matrix.get('criteria'):
244
+ return (
245
+ "<p style='color: red;'>Failed to generate a valid decision matrix. Please try again with a clearer query.</p>",
246
+ "<p>No results available.</p>",
247
+ generated_text
248
+ )
249
+
250
+ # Format table HTML
251
+ table_html = format_table_html(matrix)
252
+
253
+ progress(0.8, desc=f"Applying {method.upper()}...")
254
+
255
+ # Convert to Znum objects
256
+ znum_weights = [parse_znum_pair(w) for w in matrix['weights']]
257
+ znum_alternatives = {}
258
+ for alt_name, values in matrix['alternatives'].items():
259
+ znum_alternatives[alt_name] = [parse_znum_pair(v) for v in values]
260
+
261
+ # Check for parsing errors
262
+ if None in znum_weights or any(None in vals for vals in znum_alternatives.values()):
263
+ return (
264
+ table_html,
265
+ "<p style='color: orange;'>Warning: Some Z-numbers could not be parsed. Results may be incomplete.</p>",
266
+ generated_text
267
+ )
268
+
269
+ # Build criteria types
270
+ criteria_types = [
271
+ Beast.CriteriaType.BENEFIT if t.lower() == 'benefit' else Beast.CriteriaType.COST
272
+ for t in matrix['types']
273
+ ]
274
+
275
+ # Build decision table
276
+ alt_names = list(znum_alternatives.keys())
277
+ alt_rows = [znum_alternatives[name] for name in alt_names]
278
+ table = [znum_weights] + alt_rows + [criteria_types]
279
+
280
+ # Apply MCDM method
281
+ if method == "TOPSIS":
282
+ solver = Topsis(table)
283
+ else:
284
+ solver = Promethee(table)
285
+
286
+ solver.solve()
287
+
288
+ progress(1.0, desc="Done!")
289
+
290
+ results_html = format_results_html(alt_names, solver, method)
291
+
292
+ return table_html, results_html, generated_text
293
+
294
+
295
+ # Build Gradio interface
296
+ with gr.Blocks(
297
+ title="Text2MCDM",
298
+ theme=gr.themes.Default(
299
+ primary_hue="neutral",
300
+ neutral_hue="slate",
301
+ ),
302
+ css="""
303
+ .gradio-container { max-width: 960px !important; }
304
+ .header { text-align: center; padding: 24px 0; margin-bottom: 16px; }
305
+ .header h1 { font-size: 1.75rem; font-weight: 600; color: #111; margin: 0 0 4px 0; }
306
+ .header p { color: #666; font-size: 0.9rem; margin: 0; }
307
+ """
308
+ ) as demo:
309
+ gr.HTML('''
310
+ <div class="header">
311
+ <h1>Text2MCDM</h1>
312
+ <p>Transform decision narratives into structured Z-number analysis</p>
313
+ </div>
314
+ ''')
315
+
316
+ query_input = gr.Textbox(
317
+ label="Decision Narrative",
318
+ placeholder="Describe your decision: What are your options? What factors matter? How confident are you about each?",
319
+ lines=5,
320
+ value=DEFAULT_QUERY
321
+ )
322
+
323
+ with gr.Row():
324
+ method_dropdown = gr.Dropdown(
325
+ choices=["TOPSIS", "PROMETHEE"],
326
+ value="TOPSIS",
327
+ label="Method",
328
+ scale=1
329
+ )
330
+ submit_btn = gr.Button("Analyze", variant="primary", scale=2)
331
+
332
+ gr.Markdown("---")
333
+
334
+ with gr.Row():
335
+ with gr.Column():
336
+ gr.Markdown("**Decision Matrix**")
337
+ table_output = gr.HTML(value="<p style='color:#888;'>Results will appear here.</p>")
338
+
339
+ with gr.Column():
340
+ gr.Markdown("**Ranking**")
341
+ results_output = gr.HTML(value="<p style='color:#888;'>Results will appear here.</p>")
342
+
343
+ with gr.Accordion("Raw Model Output", open=False):
344
+ raw_output = gr.Textbox(label="Generated Text", lines=6, interactive=False)
345
+
346
+ with gr.Accordion("How it works", open=False):
347
+ gr.Markdown("""
348
+ 1. Describe your decision problem in natural language
349
+ 2. The LLM extracts alternatives, criteria, and ratings
350
+ 3. Z-numbers capture both **value** and **confidence** (format: `value:confidence`)
351
+ 4. MCDM algorithm (TOPSIS or PROMETHEE) ranks your options
352
+
353
+ **Scale:** Values 1-5 for benefits, -1 to -5 for costs. Confidence always 1-5.
354
+ """)
355
+
356
+ gr.Examples(
357
+ examples=[
358
+ [DEFAULT_QUERY, "TOPSIS"],
359
+ [DEFAULT_QUERY2, "TOPSIS"],
360
+ [DEFAULT_QUERY3, "PROMETHEE"],
361
+ ],
362
+ inputs=[query_input, method_dropdown],
363
+ label="Examples"
364
+ )
365
+
366
+ submit_btn.click(
367
+ fn=process_decision,
368
+ inputs=[query_input, method_dropdown],
369
+ outputs=[table_output, results_output, raw_output]
370
+ )
371
+
372
+ if __name__ == "__main__":
373
+ demo.launch()
helpers/utils.py CHANGED
@@ -30,4 +30,8 @@ Return ONLY a Markdown table in this exact format:
30
 
31
 
32
 
33
- DEFAULT_QUERY = "Okay so I've been going crazy trying to figure out if I should get a dog, a cat, or maybe just stick with a fish tank, and honestly I'm not even sure what matters most to me anymore. Like, companionship is huge for me, probably the most important thing, and I'm pretty confident dogs are amazing for that, cats are okay but kind of aloof sometimes, fish are basically just decoration let's be real. Then there's the whole time commitment thing which I think matters a lot but I'm not totally sure how much—I work weird hours so dogs seem like a nightmare there, cats are supposedly independent but my friend's cat is super needy so who knows, fish are definitely easy but maybe too easy? Cost is something I should care about more than I do, I guess it's moderately important, and I've heard dogs are expensive with vet bills and food, cats are cheaper I think, fish setups can actually cost a lot upfront but then nothing after. Oh and allergies—my roommate might be slightly allergic to cats, we're not 100% sure, dogs seem fine, fish obviously no issue there, so that's pretty important actually. And like, the whole lifestyle flexibility thing where I want to travel sometimes, dogs are terrible for that obviously, cats you can leave for a weekend maybe, fish just need an auto-feeder so that's nice, but I'm not sure how much I'll actually travel so maybe this doesn't matter that much?"
 
 
 
 
 
30
 
31
 
32
 
33
+ DEFAULT_QUERY = "Okay so I've been going crazy trying to figure out if I should get a dog, a cat, or maybe just stick with a fish tank, and honestly I'm not even sure what matters most to me anymore. Like, companionship is huge for me, probably the most important thing, and I'm pretty confident dogs are amazing for that, cats are okay but kind of aloof sometimes, fish are basically just decoration let's be real. Then there's the whole time commitment thing which I think matters a lot but I'm not totally sure how much—I work weird hours so dogs seem like a nightmare there, cats are supposedly independent but my friend's cat is super needy so who knows, fish are definitely easy but maybe too easy? Cost is something I should care about more than I do, I guess it's moderately important, and I've heard dogs are expensive with vet bills and food, cats are cheaper I think, fish setups can actually cost a lot upfront but then nothing after. Oh and allergies—my roommate might be slightly allergic to cats, we're not 100% sure, dogs seem fine, fish obviously no issue there, so that's pretty important actually. And like, the whole lifestyle flexibility thing where I want to travel sometimes, dogs are terrible for that obviously, cats you can leave for a weekend maybe, fish just need an auto-feeder so that's nice, but I'm not sure how much I'll actually travel so maybe this doesn't matter that much?"
34
+
35
+ DEFAULT_QUERY2 = "I'm trying to decide between taking a train, flying, or driving for my upcoming trip from Paris to Amsterdam, and I need to figure out what actually matters here. Environmental impact is absolutely my top priority, like I cannot stress this enough, I've been trying to reduce my carbon footprint all year and this is non-negotiable for me. The train is obviously amazing for this, planes are a disaster environmentally, and driving alone isn't great either but better than flying I guess. Cost matters somewhat but honestly I have some savings and this is a special trip so I'm not going to stress about it too much—trains are reasonable, flights can be cheap with budget airlines, driving with gas and tolls probably adds up. Comfort is kind of important to me since I get motion sick and anxious in certain situations, and trains are super relaxing with legroom and wifi, planes make me nervous and cramped, cars are fine but I'd be exhausted from driving. Time efficiency barely matters since I'm not in a rush at all and I'm taking the whole week off anyway—I know planes are fastest at like an hour, trains take about 3 hours, driving is maybe 5 hours, but whatever, I have time. Oh and reliability is pretty important because I absolutely cannot miss my friend's wedding ceremony on Saturday, and trains are almost always on time, flights get delayed constantly especially budget ones, and driving you could hit traffic or car trouble."
36
+
37
+ DEFAULT_QUERY3 = "Okay I desperately need help choosing a venue for my company's annual client dinner and my options have somehow narrowed down to The Wellington House, my coworker Steve's garage, or the food court at the mall. Professionalism is absolutely critical here, like my CEO will be there and we're trying to impress clients who could sign a million dollar contract—The Wellington House is this gorgeous upscale restaurant with private dining rooms and waitstaff in suits, Steve's garage has oil stains on the floor and he swears he'll clean out the lawnmower but I don't trust him, the food court has a Sbarro and teenagers doing TikTok dances. Food quality is equally essential because these clients flew in from Tokyo specifically for this, and Wellington House has a Michelin-starred chef, Steve said he'd order Domino's which honestly isn't even his worst idea, the food court is self-explanatory. Parking and accessibility is very important since several attendees are elderly executives, Wellington House has complimentary valet service, Steve lives down a dirt road with no street lights and his driveway fits maybe three cars, the mall actually has decent parking I'll admit but you'd have to walk past a guy selling phone cases from a kiosk. Audio-visual capabilities matter a lot because we have a presentation to give, Wellington House has a built-in projector and screen with tech support, Steve has a TV but it's been displaying only green since 2019, food court is a no. Budget is the only place where I have any flexibility and even then it's not huge—Wellington House is expensive but we have the budget approved, Steve offered to host for free but the reputation damage would cost us millions, food court would be cheap but again we'd lose the contract and possibly our jobs."
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ gradio>=4.44.0
2
+ transformers>=4.45.0
3
+ torch>=2.0.0
4
+ accelerate>=0.25.0
5
+ znum>=0.5.0
6
+ spaces