buley commited on
Commit
b4d8480
·
verified ·
1 Parent(s): 2ba37ce

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. Dockerfile +17 -0
  2. README.md +3 -9
  3. aeon.css +148 -0
  4. app.py +57 -192
  5. requirements.txt +4 -2
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ RUN apt-get update && apt-get install -y build-essential cmake && rm -rf /var/lib/apt/lists/*
4
+ RUN pip install --no-cache-dir \
5
+ "gradio==4.44.1" \
6
+ "huggingface-hub==0.26.5" \
7
+ "llama-cpp-python>=0.3.0"
8
+
9
+ RUN useradd -m -u 1000 user
10
+ RUN mkdir -p /tmp/hf_cache && chown -R user:user /tmp/hf_cache
11
+ USER user
12
+ WORKDIR /app
13
+ COPY app.py aeon.css ./
14
+ ENV PYTHONUNBUFFERED=1
15
+ ENV HF_HOME=/tmp/hf_cache
16
+ EXPOSE 7860
17
+ CMD ["python", "app.py"]
README.md CHANGED
@@ -1,15 +1,9 @@
1
  ---
2
  title: The Void - Buleyean RL Demo
3
- emoji: "\U0001F573\uFE0F"
4
  colorFrom: gray
5
  colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 5.23.0
8
- python_version: "3.11"
9
- app_file: app.py
10
  pinned: true
11
- models:
12
- - bartowski/SmolLM2-360M-Instruct-GGUF
13
- - forkjoin-ai/buleyean-smollm2-360m
14
- - HuggingFaceTB/SmolLM2-360M-Instruct
15
  ---
 
1
  ---
2
  title: The Void - Buleyean RL Demo
3
+ emoji: 🕳️
4
  colorFrom: gray
5
  colorTo: indigo
6
+ sdk: docker
7
+ app_port: 7860
 
 
8
  pinned: true
 
 
 
 
9
  ---
aeon.css ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* AeonOS Design System for Gradio Spaces */
2
+
3
+ .gradio-container {
4
+ background: #09090b !important;
5
+ color: #fafafa !important;
6
+ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif !important;
7
+ }
8
+
9
+ /* Surface panels */
10
+ .block, .panel, .form, .wrap, .contain {
11
+ background: #0c0c0f !important;
12
+ border-color: #1f1f23 !important;
13
+ border-radius: 6px !important;
14
+ }
15
+
16
+ /* Elevated panels */
17
+ .gr-panel, .gr-box, .gr-form {
18
+ background: #111114 !important;
19
+ border: 1px solid #1f1f23 !important;
20
+ }
21
+
22
+ /* Text */
23
+ .prose, .markdown, label, .label-wrap {
24
+ color: #fafafa !important;
25
+ }
26
+
27
+ .prose h1, .prose h2, .prose h3, .markdown h1, .markdown h2, .markdown h3 {
28
+ color: #fafafa !important;
29
+ }
30
+
31
+ .prose p, .markdown p {
32
+ color: #a1a1aa !important;
33
+ }
34
+
35
+ /* Secondary text */
36
+ .secondary-text, .caption, .info {
37
+ color: #71717a !important;
38
+ }
39
+
40
+ /* Links */
41
+ a, .prose a, .markdown a {
42
+ color: #3b82f6 !important;
43
+ text-decoration: none !important;
44
+ }
45
+ a:hover {
46
+ color: #60a5fa !important;
47
+ }
48
+
49
+ /* Inputs */
50
+ input, textarea, select, .gr-input, .gr-text-input {
51
+ background: #09090b !important;
52
+ border: 1px solid #1f1f23 !important;
53
+ color: #fafafa !important;
54
+ border-radius: 6px !important;
55
+ font-size: 16px !important;
56
+ }
57
+ input:focus, textarea:focus {
58
+ border-color: #3b82f6 !important;
59
+ outline: none !important;
60
+ box-shadow: 0 0 0 2px rgba(59, 130, 246, 0.15) !important;
61
+ }
62
+
63
+ /* Primary button */
64
+ .primary, button.primary, .gr-button-primary {
65
+ background: #3b82f6 !important;
66
+ color: #fafafa !important;
67
+ border: none !important;
68
+ border-radius: 6px !important;
69
+ font-weight: 500 !important;
70
+ transition: background 150ms ease !important;
71
+ }
72
+ .primary:hover, button.primary:hover {
73
+ background: #60a5fa !important;
74
+ }
75
+
76
+ /* Secondary buttons */
77
+ button, .gr-button {
78
+ background: #18181b !important;
79
+ color: #a1a1aa !important;
80
+ border: 1px solid #1f1f23 !important;
81
+ border-radius: 6px !important;
82
+ }
83
+ button:hover, .gr-button:hover {
84
+ background: #1e1e22 !important;
85
+ border-color: #27272a !important;
86
+ }
87
+
88
+ /* Tables */
89
+ table {
90
+ border-collapse: collapse !important;
91
+ }
92
+ th {
93
+ background: #111114 !important;
94
+ color: #a1a1aa !important;
95
+ border-bottom: 1px solid #1f1f23 !important;
96
+ padding: 8px 12px !important;
97
+ text-align: left !important;
98
+ }
99
+ td {
100
+ border-bottom: 1px solid #17171a !important;
101
+ padding: 8px 12px !important;
102
+ color: #fafafa !important;
103
+ }
104
+
105
+ /* Code blocks */
106
+ code, pre {
107
+ background: #111114 !important;
108
+ color: #93c5fd !important;
109
+ border-radius: 4px !important;
110
+ border: 1px solid #1f1f23 !important;
111
+ }
112
+
113
+ /* Accent glow for key metrics */
114
+ .metric-highlight {
115
+ color: #3b82f6 !important;
116
+ font-weight: 600 !important;
117
+ }
118
+
119
+ /* Status colors */
120
+ .success { color: #22c55e !important; }
121
+ .warning { color: #eab308 !important; }
122
+ .error { color: #ef4444 !important; }
123
+
124
+ /* Dataset/Examples component */
125
+ .dataset-row, .sample-row {
126
+ background: #111114 !important;
127
+ border: 1px solid #1f1f23 !important;
128
+ }
129
+ .dataset-row:hover, .sample-row:hover {
130
+ background: #18181b !important;
131
+ }
132
+
133
+ /* Scrollbar */
134
+ ::-webkit-scrollbar {
135
+ width: 6px;
136
+ }
137
+ ::-webkit-scrollbar-track {
138
+ background: #09090b;
139
+ }
140
+ ::-webkit-scrollbar-thumb {
141
+ background: #27272a;
142
+ border-radius: 3px;
143
+ }
144
+
145
+ /* Footer */
146
+ footer {
147
+ color: #52525b !important;
148
+ }
app.py CHANGED
@@ -4,219 +4,84 @@ LIVE inference only. Every response generated in real-time.
4
  """
5
 
6
  import gradio as gr
7
- from transformers import AutoModelForCausalLM, AutoTokenizer
8
- from peft import PeftModel
9
- import torch
10
-
11
- BASE_MODEL_ID = "HuggingFaceTB/SmolLM2-360M-Instruct"
12
- BULEYEAN_ADAPTER = "forkjoin-ai/buleyean-smollm2-360m"
13
-
14
- print("Loading tokenizer...", flush=True)
15
- tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL_ID)
16
- if tokenizer.pad_token is None:
17
- tokenizer.pad_token = tokenizer.eos_token
18
-
19
- print(f"Loading base model ({BASE_MODEL_ID})...", flush=True)
20
- base_model = AutoModelForCausalLM.from_pretrained(
21
- BASE_MODEL_ID,
22
- torch_dtype=torch.float32,
23
- device_map="cpu",
24
- trust_remote_code=True,
25
  )
26
- print("Base model loaded.", flush=True)
27
-
28
- print("Loading Buleyean adapter...", flush=True)
29
- bule_base = AutoModelForCausalLM.from_pretrained(
30
- BASE_MODEL_ID,
31
- torch_dtype=torch.float32,
32
- device_map="cpu",
33
- trust_remote_code=True,
34
  )
35
- try:
36
- bule_model = PeftModel.from_pretrained(bule_base, BULEYEAN_ADAPTER)
37
- bule_model = bule_model.merge_and_unload()
38
- print("Buleyean adapter merged.", flush=True)
39
- except Exception as e:
40
- print(f"Warning: Could not load adapter ({e}), using base model copy", flush=True)
41
- bule_model = bule_base
42
 
43
- print("All models ready. Live inference active.", flush=True)
 
 
 
44
 
45
 
46
- def generate(prompt, model, max_tokens=300, temperature=0.7, top_p=0.9):
47
- messages = [{"role": "user", "content": prompt}]
48
- input_text = tokenizer.apply_chat_template(
49
- messages, tokenize=False, add_generation_prompt=True
50
- )
51
- inputs = tokenizer(input_text, return_tensors="pt")
52
- with torch.no_grad():
53
- outputs = model.generate(
54
- **inputs,
55
- max_new_tokens=max_tokens,
56
- temperature=temperature,
57
- top_p=top_p,
58
- do_sample=True,
59
- pad_token_id=tokenizer.pad_token_id,
60
- )
61
- response = tokenizer.decode(
62
- outputs[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True
63
  )
64
- return response.strip()
65
 
66
 
67
  def compare(prompt):
68
  if not prompt or not prompt.strip():
69
  return "Please enter a prompt.", "Please enter a prompt."
70
- base_out = generate(prompt, base_model)
71
- bule_out = generate(prompt, bule_model)
72
  return base_out, bule_out
73
 
74
 
75
- # ---------------------------------------------------------------------------
76
- # UI
77
- # ---------------------------------------------------------------------------
78
-
79
- with gr.Blocks(
80
- title="The Void -- Buleyean RL",
81
- theme=gr.themes.Base(primary_hue="indigo"),
82
- ) as demo:
83
-
84
  gr.Markdown("""# The Void -- Buleyean RL
85
 
86
  **Live inference. Every response generated in real-time. Nothing hardcoded. Nothing fabricated.**
87
 
88
- Standard RLHF learns what to say by imitating chosen completions.
89
- Buleyean RL learns what *not* to say by studying rejections.
90
- The complement distribution preserves the (K-1) rejected perspectives.
91
-
92
- [Library](https://github.com/forkjoin-ai/buleyean-rl) | [Paper](https://forkracefold.com) | [Training Data](https://huggingface.co/datasets/forkjoin-ai/buleyean-rejection-data) | 500+ Lean 4 theorems, zero sorry
93
- """)
94
 
95
- with gr.Tab("Compare (Base vs Buleyean)"):
96
- gr.Markdown("""Type any prompt. Both models run inference right now on this machine.
97
 
98
- **Base:** [SmolLM2-360M-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM2-360M-Instruct) |
99
- **Buleyean:** [buleyean-smollm2-360m](https://huggingface.co/forkjoin-ai/buleyean-smollm2-360m) -- same architecture, trained from rejection alone
100
- """)
101
- prompt = gr.Textbox(
102
- label="Your prompt", lines=2,
103
- placeholder="Type anything and press Generate...",
104
- )
105
- btn = gr.Button("Generate (live inference)", variant="primary", size="lg")
106
- with gr.Row():
107
- with gr.Column():
108
- gr.Markdown("### Base Model (null hypothesis)")
109
- base_out = gr.Textbox(
110
- label="SmolLM2-360M-Instruct", lines=12, interactive=False,
111
- )
112
- with gr.Column():
113
- gr.Markdown("### Buleyean-Trained (from the void)")
114
- bule_out = gr.Textbox(
115
- label="buleyean-smollm2-360m", lines=12, interactive=False,
116
- )
117
-
118
- btn.click(compare, [prompt], [base_out, bule_out])
119
- prompt.submit(compare, [prompt], [base_out, bule_out])
120
-
121
- gr.Markdown("### Try these prompts:")
122
- for p in [
123
- "What is the theory of failure?",
124
- "How are you feeling today?",
125
- "I've been feeling really anxious lately.",
126
- "Write me a haiku about failure.",
127
- "What is the meaning of life?",
128
- ]:
129
- gr.Button(p, size="sm").click(
130
- fn=lambda x=p: compare(x),
131
- inputs=[],
132
- outputs=[base_out, bule_out],
133
- ).then(fn=lambda x=p: x, inputs=[], outputs=[prompt])
134
-
135
- with gr.Tab("Personality Models"):
136
- gr.Markdown("""## The Personality IS the Walker
137
-
138
- Same rejection data. Same base model. Five different complement distributions.
139
-
140
- Each personality profile modulates how the void walker traverses the rejection boundary:
141
-
142
- | Personality | Try (Fork) | Choose (Race) | Commit (Fold) | Let Go (Vent) | Learn (Interfere) | Result |
143
- |---|---|---|---|---|---|---|
144
- | **Builder** | 0.5 | 0.8 | **0.9** | 0.4 | 0.618 | Tightest convergence (97%). The fold dominates. |
145
- | **Anxious** | 0.3 | 0.5 | 0.7 | **0.15** | 0.4 | Learns slowly, forgets nothing (79%). |
146
- | **Balanced** | 0.618 | 0.618 | 0.618 | 0.618 | 0.618 | All phi. The control (81%). |
147
- | **Explorer** | **0.9** | 0.618 | 0.4 | 0.7 | 0.85 | Wide aperture, broad distribution (73%). |
148
- | **Creative** | **0.95** | 0.4 | 0.3 | 0.8 | **0.9** | Max divergence, keeps options open (73%). |
149
-
150
- ### Training Results (Qwen2.5-32B-Instruct)
151
-
152
- | Personality | Alpha | Final Loss | Min Loss | Curriculum |
153
- |---|---|---|---|---|
154
- | Builder | 0.950 | 0.293 | 0.270 | inverse_bule |
155
- | Anxious | 0.793 | 0.543 | 0.495 | rejection_density |
156
- | Balanced | 0.700 | 0.830 | 0.741 | rejection_density |
157
- | Explorer | 0.453 | 2.937 | 2.708 | kurtosis |
158
- | Creative | 0.340 | 3.525 | 3.239 | kurtosis |
159
-
160
- ### Download the models
161
-
162
- All five personality-modulated LoRA adapters for Qwen2.5-32B:
163
-
164
- - [buleyean-qwen2.5-32b-builder](https://huggingface.co/forkjoin-ai/buleyean-qwen2.5-32b-builder)
165
- - [buleyean-qwen2.5-32b-anxious](https://huggingface.co/forkjoin-ai/buleyean-qwen2.5-32b-anxious)
166
- - [buleyean-qwen2.5-32b-balanced](https://huggingface.co/forkjoin-ai/buleyean-qwen2.5-32b-balanced)
167
- - [buleyean-qwen2.5-32b-explorer](https://huggingface.co/forkjoin-ai/buleyean-qwen2.5-32b-explorer)
168
- - [buleyean-qwen2.5-32b-creative](https://huggingface.co/forkjoin-ai/buleyean-qwen2.5-32b-creative)
169
-
170
- ```python
171
- from transformers import AutoModelForCausalLM
172
- from peft import PeftModel
173
-
174
- base = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-32B-Instruct", device_map="auto")
175
- # Choose your personality:
176
- model = PeftModel.from_pretrained(base, "forkjoin-ai/buleyean-qwen2.5-32b-builder", subfolder="lora")
177
- model = model.merge_and_unload()
178
- ```
179
  """)
180
-
181
- with gr.Tab("All Models"):
182
- gr.Markdown("""## Buleyean RL Model Family
183
-
184
- All models trained from rejection alone. No reward model. No chosen examples.
185
-
186
- ### Base Buleyean Models
187
-
188
- | Model | Base | Size | HF Link |
189
- |---|---|---|---|
190
- | buleyean-qwen2.5-32b | Qwen2.5-32B-Instruct | 32B | [Download](https://huggingface.co/forkjoin-ai/buleyean-qwen2.5-32b) |
191
- | buleyean-qwen2.5-7b | Qwen2.5-7B-Instruct | 7B | [Download](https://huggingface.co/forkjoin-ai/buleyean-qwen2.5-7b) |
192
- | buleyean-deepseek-r1-7b | DeepSeek-R1-Distill-Qwen-7B | 7B | [Download](https://huggingface.co/forkjoin-ai/buleyean-deepseek-r1-7b) |
193
- | buleyean-mistral-7b | Mistral-7B-Instruct-v0.3 | 7B | [Download](https://huggingface.co/forkjoin-ai/buleyean-mistral-7b) |
194
- | buleyean-qwen2.5-0.5b | Qwen2.5-0.5B-Instruct | 0.5B | [Download](https://huggingface.co/forkjoin-ai/buleyean-qwen2.5-0.5b) |
195
- | buleyean-smollm2-360m | SmolLM2-360M-Instruct | 360M | [Download](https://huggingface.co/forkjoin-ai/buleyean-smollm2-360m) |
196
-
197
- ### Personality-Modulated (32B)
198
-
199
- | Personality | Commit | Final Loss | HF Link |
200
- |---|---|---|---|
201
- | Builder | 0.9 | 0.293 | [Download](https://huggingface.co/forkjoin-ai/buleyean-qwen2.5-32b-builder) |
202
- | Anxious | 0.7 | 0.543 | [Download](https://huggingface.co/forkjoin-ai/buleyean-qwen2.5-32b-anxious) |
203
- | Balanced | 0.618 | 0.830 | [Download](https://huggingface.co/forkjoin-ai/buleyean-qwen2.5-32b-balanced) |
204
- | Explorer | 0.4 | 2.937 | [Download](https://huggingface.co/forkjoin-ai/buleyean-qwen2.5-32b-explorer) |
205
- | Creative | 0.3 | 3.525 | [Download](https://huggingface.co/forkjoin-ai/buleyean-qwen2.5-32b-creative) |
206
-
207
- ### Links
208
-
209
- - [Training library](https://github.com/forkjoin-ai/buleyean-rl)
210
- - [Training data](https://huggingface.co/datasets/forkjoin-ai/buleyean-rejection-data)
211
- - [Paper](https://forkracefold.com)
212
- - [Colab: Train your own](https://huggingface.co/datasets/forkjoin-ai/buleyean-rejection-data/blob/main/train_70b_colab.ipynb)
213
- - [Colab: Personality sweep](https://huggingface.co/datasets/forkjoin-ai/buleyean-rejection-data/blob/main/personality_sweep_colab.ipynb)
214
- """)
215
-
216
- gr.Markdown("""---
217
- *Built with [Buleyean RL](https://github.com/forkjoin-ai/buleyean-rl). The void is where the information is.*
218
- """)
219
-
220
 
221
  if __name__ == "__main__":
222
  demo.launch(server_name="0.0.0.0", server_port=7860)
 
4
  """
5
 
6
  import gradio as gr
7
+ from llama_cpp import Llama
8
+ from huggingface_hub import hf_hub_download
9
+ import os, sys
10
+
11
+ print("Downloading Buleyean model...", flush=True)
12
+ bule_path = hf_hub_download(
13
+ repo_id="forkjoin-ai/buleyean-smollm2-360m",
14
+ filename="buleyean-smollm2-360m-q4_k_m.gguf",
15
+ cache_dir="/tmp/hf_cache",
 
 
 
 
 
 
 
 
 
16
  )
17
+ print(f"Buleyean model ready.", flush=True)
18
+
19
+ print("Downloading base model...", flush=True)
20
+ base_path = hf_hub_download(
21
+ repo_id="bartowski/SmolLM2-360M-Instruct-GGUF",
22
+ filename="SmolLM2-360M-Instruct-Q4_K_M.gguf",
23
+ cache_dir="/tmp/hf_cache",
 
24
  )
25
+ print(f"Base model ready.", flush=True)
 
 
 
 
 
 
26
 
27
+ print("Loading models into memory...", flush=True)
28
+ bule_llm = Llama(model_path=bule_path, n_ctx=512, n_threads=4, verbose=False)
29
+ base_llm = Llama(model_path=base_path, n_ctx=512, n_threads=4, verbose=False)
30
+ print("Both models loaded. Live inference ready.", flush=True)
31
 
32
 
33
+ def generate(prompt, model):
34
+ out = model(
35
+ f"<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n",
36
+ max_tokens=300,
37
+ temperature=0.7,
38
+ top_p=0.9,
39
+ stop=["<|im_end|>", "<|im_start|>"],
 
 
 
 
 
 
 
 
 
 
40
  )
41
+ return out["choices"][0]["text"].strip()
42
 
43
 
44
  def compare(prompt):
45
  if not prompt or not prompt.strip():
46
  return "Please enter a prompt.", "Please enter a prompt."
47
+ base_out = generate(prompt, base_llm)
48
+ bule_out = generate(prompt, bule_llm)
49
  return base_out, bule_out
50
 
51
 
52
+ import pathlib; _aeon_css = pathlib.Path("aeon.css").read_text() if pathlib.Path("aeon.css").exists() else ""
53
+ with gr.Blocks(css=_aeon_css, title="The Void", theme=gr.themes.Base(primary_hue="indigo")) as demo:
 
 
 
 
 
 
 
54
  gr.Markdown("""# The Void -- Buleyean RL
55
 
56
  **Live inference. Every response generated in real-time. Nothing hardcoded. Nothing fabricated.**
57
 
58
+ Type any prompt. Both models run inference right now on this machine.
 
 
 
 
 
59
 
60
+ Base: [SmolLM2-360M-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM2-360M-Instruct) (Q4_K_M GGUF)
61
+ Buleyean: [buleyean-smollm2-360m](https://huggingface.co/forkjoin-ai/buleyean-smollm2-360m) -- same model, trained from rejection alone (Q4_K_M GGUF)
62
 
63
+ [Library](https://github.com/forkjoin-ai/buleyean-rl) | [Paper](https://huggingface.co/forkjoin-ai) | 500+ Lean 4 theorems, zero sorry
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  """)
65
+ prompt = gr.Textbox(label="Your prompt", lines=2, placeholder="Type anything and press Generate...")
66
+ btn = gr.Button("Generate (live inference)", variant="primary", size="lg")
67
+ with gr.Row():
68
+ with gr.Column():
69
+ gr.Markdown("### Base Model (null hypothesis)")
70
+ base_out = gr.Textbox(label="SmolLM2-360M-Instruct", lines=12, interactive=False)
71
+ with gr.Column():
72
+ gr.Markdown("### Buleyean-Trained (from the void)")
73
+ bule_out = gr.Textbox(label="buleyean-smollm2-360m", lines=12, interactive=False)
74
+
75
+ btn.click(compare, [prompt], [base_out, bule_out])
76
+ prompt.submit(compare, [prompt], [base_out, bule_out])
77
+
78
+ gr.Markdown("### Try these prompts:")
79
+ for p in ["hello", "How are you feeling today?", "I've been feeling really anxious lately.", "Write me a haiku about failure.", "What is the meaning of life?"]:
80
+ gr.Button(p, size="sm").click(
81
+ fn=lambda x=p: compare(x),
82
+ inputs=[],
83
+ outputs=[base_out, bule_out],
84
+ ).then(fn=lambda x=p: x, inputs=[], outputs=[prompt])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
  if __name__ == "__main__":
87
  demo.launch(server_name="0.0.0.0", server_port=7860)
requirements.txt CHANGED
@@ -1,7 +1,9 @@
1
- gradio>=5.0.0
2
  transformers>=4.46.0
3
  peft>=0.13.0
4
  torch>=2.1.0
5
  accelerate>=1.0.0
6
  sentencepiece>=0.2.0
7
- huggingface-hub>=0.26.0
 
 
 
1
+ gradio>=4.44.0,<5.0.0
2
  transformers>=4.46.0
3
  peft>=0.13.0
4
  torch>=2.1.0
5
  accelerate>=1.0.0
6
  sentencepiece>=0.2.0
7
+ protobuf>=4.25.0
8
+ numpy>=1.26.0
9
+ scipy>=1.12.0