0notexist0 commited on
Commit
ef9f0e5
·
verified ·
1 Parent(s): 582c581

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -233
app.py CHANGED
@@ -1,268 +1,121 @@
1
- # ---------------------------------------------------------------
2
- # app.py
3
- # Hugging Face Space – OpenRouter Chat (Gradio + Telegram)
4
- # ---------------------------------------------------------------
 
5
  import os
6
  import functools
7
- import threading
8
  import requests
9
  import gradio as gr
10
- from telegram import (
11
- Update,
12
- InlineKeyboardButton,
13
- InlineKeyboardMarkup,
14
- )
15
- from telegram.ext import (
16
- ApplicationBuilder,
17
- CommandHandler,
18
- MessageHandler,
19
- filters,
20
- ContextTypes,
21
- CallbackQueryHandler,
22
- )
23
 
24
- # ----------------------------------------------------------------
25
- # 1. Secrets (set them in HF Space → Settings → Repository secrets)
26
- # ----------------------------------------------------------------
27
- OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
28
- TELEGRAM_BOT_TOKEN = os.getenv("TELEGRAM_BOT_TOKEN")
 
 
29
 
30
- # ----------------------------------------------------------------
31
- # 2. Fetch & group models (cached at start-up)
32
- # ----------------------------------------------------------------
33
  @functools.lru_cache(maxsize=1)
34
- def fetch_models_grouped():
 
 
 
 
 
35
  headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"}
36
  try:
37
- resp = requests.get("https://openrouter.ai/api/v1/models", headers=headers, timeout=15)
 
 
 
 
38
  resp.raise_for_status()
39
  data = resp.json()
 
 
40
  except Exception as e:
41
- print("⚠️ Could not fetch models – using fallbacks:", e)
42
- return {
43
- "reasoning": ["openai/gpt-4-turbo"],
44
- "casual": ["meta-llama/llama-3.2-3b-instruct"],
45
- }
46
-
47
- reasoning, casual = [], []
48
- for m in data["data"]:
49
- mid = m["id"].lower()
50
- if any(k in mid for k in ["gpt", "claude", "gemini", "qwen", "mistral", "llama"]):
51
- reasoning.append(m["id"])
52
- else:
53
- casual.append(m["id"])
54
- return {"reasoning": sorted(reasoning), "casual": sorted(casual)}
55
-
56
- # ----------------------------------------------------------------
57
- # 3. Format helpers
58
- # ----------------------------------------------------------------
59
- def format_history(history):
60
- lines = []
61
- for msg in history:
62
- role = "Tu" if msg["role"] == "user" else "Assistente"
63
- lines.append(f"{role}:\n{msg['content']}\n")
64
- return "\n".join(lines).strip()
65
 
66
- # ----------------------------------------------------------------
67
- # 4. Core chat engine
68
- # ----------------------------------------------------------------
69
- def chat_with_openrouter(prompt: str, model: str, history: list):
70
- if not model:
71
- history.append({"role": "assistant", "content": "⚠️ Seleziona un modello prima di inviare."})
72
- return history, format_history(history)
73
-
74
- history.append({"role": "user", "content": prompt})
75
- payload = {"model": model, "messages": history, "max_tokens": 4096, "temperature": 0.7}
76
  headers = {
77
  "Authorization": f"Bearer {OPENROUTER_API_KEY}",
78
- "Content-Type": "application/json",
 
 
 
 
 
 
 
79
  }
80
 
81
  try:
82
- r = requests.post(
83
  "https://openrouter.ai/api/v1/chat/completions",
84
  headers=headers,
85
  json=payload,
86
- timeout=60,
87
  )
88
- r.raise_for_status()
89
- reply = r.json()["choices"][0]["message"]["content"]
90
  except Exception as e:
91
- reply = f"❌ Errore: {e}"
92
-
93
- history.append({"role": "assistant", "content": reply})
94
- return history, format_history(history)
95
 
96
- # ----------------------------------------------------------------
97
- # 5. Gradio interface
98
- # ----------------------------------------------------------------
99
  def build_interface():
100
- grouped = fetch_models_grouped()
101
- default_model = grouped["reasoning"][0] if grouped["reasoning"] else ""
102
-
103
- with gr.Blocks(title="NOT EXIST Chat – Space") as demo:
104
- gr.Markdown("## 🤖 Open Source models Chatbot (HF Space edition)")
105
-
106
- saved_chats = gr.State({})
107
- current_name = gr.State("Chat #1")
108
- history = gr.State([])
109
 
 
 
 
110
  with gr.Row():
111
- with gr.Column(scale=1):
112
- gr.Markdown("### 💬 Storico")
113
- chat_selector = gr.Radio(choices=[], label="Salvate")
114
- load_btn = gr.Button("📂 Carica")
115
- new_btn = gr.Button("🆕 Nuova")
116
- save_name = gr.Textbox(label="Nome", placeholder="es. brainstorming")
117
- save_btn = gr.Button("💾 Salva")
118
-
119
- with gr.Column(scale=4):
120
- reasoning_dd = gr.Dropdown(
121
- choices=grouped["reasoning"], label="🧠 Reasoning", value=default_model
122
- )
123
- casual_dd = gr.Dropdown(
124
- choices=grouped["casual"], label="⚡ Casual"
125
- )
126
- output = gr.Textbox(label="Conversazione", lines=20, interactive=False)
127
- prompt = gr.Textbox(label="Prompt", lines=3, placeholder="Scrivi qui...")
128
- send_btn = gr.Button("📤 Invia") # <--- ADDED
129
-
130
- # --- wiring -------------------------------------------------
131
- def resolve(prompt, r, c, hist):
132
- model = r or c
133
- return chat_with_openrouter(prompt, model, hist)
134
-
135
- def new_chat():
136
- return [], "", f"Chat #{os.urandom(2).hex()}", gr.update(choices=[])
137
-
138
- def save_fn(chats, name, hist):
139
- if not name:
140
- return chats, gr.update(), "⚠️ Inserisci un nome"
141
- chats[name] = hist.copy()
142
- return chats, gr.update(choices=list(chats.keys())), f"✅ Salvata '{name}'"
143
-
144
- def load_fn(chats, sel):
145
- return (chats[sel], format_history(chats[sel]), sel) if sel in chats else ([], "", "⚠️ Seleziona chat")
146
-
147
- # Gradio events
148
- send_btn.click( # <--- NEW EVENT
149
- fn=resolve,
150
- inputs=[prompt, reasoning_dd, casual_dd, history],
151
- outputs=[history, output],
152
- queue=False,
153
- )
154
- prompt.submit(
155
- fn=resolve,
156
- inputs=[prompt, reasoning_dd, casual_dd, history],
157
- outputs=[history, output],
158
- queue=False,
159
- )
160
- save_btn.click(
161
- fn=save_fn,
162
- inputs=[saved_chats, save_name, history],
163
- outputs=[saved_chats, chat_selector, output],
164
- queue=False,
165
- )
166
- load_btn.click(
167
- fn=load_fn,
168
- inputs=[saved_chats, chat_selector],
169
- outputs=[history, output, current_name],
170
- queue=False,
171
  )
172
- new_btn.click(
173
- fn=new_chat,
174
- outputs=[history, output, current_name, chat_selector],
175
- queue=False,
176
- )
177
-
178
- return demo
179
-
180
- # ----------------------------------------------------------------
181
- # 6. Telegram bot (background)
182
- # ----------------------------------------------------------------
183
- telegram_states = {}
184
 
185
- def tg_model_menu():
186
- grouped = fetch_models_grouped()
187
- buttons = [
188
- [InlineKeyboardButton("🧠 Reasoning", callback_data="reasoning")],
189
- *[InlineKeyboardButton(m[:40], callback_data=f"model:{m}")
190
- for m in grouped["reasoning"][:6]],
191
- [InlineKeyboardButton("⚡ Casual", callback_data="casual")],
192
- *[InlineKeyboardButton(m[:40], callback_data=f"model:{m}")
193
- for m in grouped["casual"][:6]],
194
- ]
195
- return InlineKeyboardMarkup(buttons)
196
-
197
- async def tg_start(update: Update, ctx: ContextTypes.DEFAULT_TYPE):
198
- chat_id = update.effective_chat.id
199
- telegram_states[chat_id] = {
200
- "history": [],
201
- "model": fetch_models_grouped()["reasoning"][0],
202
- }
203
- await update.message.reply_text("👋 Benvenuto! Scrivimi pure.")
204
-
205
- async def tg_menu(update: Update, ctx: ContextTypes.DEFAULT_TYPE):
206
- await update.message.reply_text("Scegli un modello:", reply_markup=tg_model_menu())
207
-
208
- async def tg_model_button(update: Update, ctx: ContextTypes.DEFAULT_TYPE):
209
- query = update.callback_query
210
- await query.answer()
211
- data = query.data
212
- chat_id = query.message.chat_id
213
-
214
- grouped = fetch_models_grouped()
215
- all_models = grouped["reasoning"] + grouped["casual"]
216
-
217
- if data == "reasoning":
218
- await query.edit_message_text(
219
- "Seleziona un *reasoning* modello:",
220
- reply_markup=InlineKeyboardMarkup(
221
- [[InlineKeyboardButton(m[:40], callback_data=f"model:{m}")] for m in grouped["reasoning"][:6]]
222
- ),
223
- parse_mode="Markdown",
224
  )
225
- elif data == "casual":
226
- await query.edit_message_text(
227
- "Seleziona un *casual* modello:",
228
- reply_markup=InlineKeyboardMarkup(
229
- [[InlineKeyboardButton(m[:40], callback_data=f"model:{m}")] for m in grouped["casual"][:6]]
230
- ),
231
- parse_mode="Markdown",
232
  )
233
- elif data.startswith("model:"):
234
- model = data[6:]
235
- telegram_states.setdefault(chat_id, {})["model"] = model
236
- await query.edit_message_text(f"✅ Modello impostato su: *{model}*", parse_mode="Markdown")
237
 
238
- async def tg_msg(update: Update, ctx: ContextTypes.DEFAULT_TYPE):
239
- chat_id = update.effective_chat.id
240
- user_input = update.message.text
241
- state = telegram_states.setdefault(
242
- chat_id, {"history": [], "model": fetch_models_grouped()["reasoning"][0]}
243
- )
244
- hist, _ = chat_with_openrouter(user_input, state["model"], state["history"])
245
- reply = hist[-1]["content"]
246
- state["history"] = hist
247
- await update.message.reply_text(reply)
248
 
249
- def run_telegram():
250
- if not TELEGRAM_BOT_TOKEN:
251
- print("🤖 TELEGRAM_BOT_TOKEN non trovato – bot Telegram OFF.")
252
- return
253
- app = ApplicationBuilder().token(TELEGRAM_BOT_TOKEN).build()
254
- app.add_handler(CommandHandler("start", tg_start))
255
- app.add_handler(CommandHandler("menu", tg_menu))
256
- app.add_handler(CallbackQueryHandler(tg_model_button))
257
- app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, tg_msg))
258
- print("🤖 Telegram bot in background...")
259
- app.run_polling(stop_signals=None)
260
 
261
- # ----------------------------------------------------------------
262
- # 7. Entry-point
263
- # ----------------------------------------------------------------
264
  if __name__ == "__main__":
265
- if TELEGRAM_BOT_TOKEN:
266
- threading.Thread(target=run_telegram, daemon=True).start()
267
- demo = build_interface()
268
- demo.queue().launch()
 
1
+ """
2
+ OpenRouter Chatbot – elenco dinamico completo
3
+ Run: gradio app.py
4
+ """
5
+
6
  import os
7
  import functools
 
8
  import requests
9
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
+ # ------------------------------------------------------------------
12
+ # Configurazione
13
+ # ------------------------------------------------------------------
14
+ OPENROUTER_API_KEY = os.getenv(
15
+ "OPENROUTER_API_KEY",
16
+ "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"
17
+ )
18
 
19
+ # ------------------------------------------------------------------
20
+ # Utility per ottenere l’elenco modelli
21
+ # ------------------------------------------------------------------
22
  @functools.lru_cache(maxsize=1)
23
+ def fetch_models() -> list[str]:
24
+ """
25
+ Restituisce la lista completa di modelli offerti da OpenRouter.
26
+ Il decorator @lru_cache la memorizza in cache per tutta la durata
27
+ del processo (si aggiorna solo al riavvio del container).
28
+ """
29
  headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"}
30
  try:
31
+ resp = requests.get(
32
+ "https://openrouter.ai/api/v1/models",
33
+ headers=headers,
34
+ timeout=15
35
+ )
36
  resp.raise_for_status()
37
  data = resp.json()
38
+ models = sorted(m["id"] for m in data["data"])
39
+ return models
40
  except Exception as e:
41
+ gr.Warning(f"Impossibile scaricare l’elenco modelli: {e}")
42
+ return ["openai/gpt-4-turbo"] # fallback statico
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
+ # ------------------------------------------------------------------
45
+ # Funzione di chiamata al modello
46
+ # ------------------------------------------------------------------
47
+ def chat_with_openrouter(prompt: str, model: str):
 
 
 
 
 
 
48
  headers = {
49
  "Authorization": f"Bearer {OPENROUTER_API_KEY}",
50
+ "Content-Type": "application/json"
51
+ }
52
+
53
+ payload = {
54
+ "model": model,
55
+ "messages": [{"role": "user", "content": prompt}],
56
+ "max_tokens": 4096,
57
+ "temperature": 0.7,
58
  }
59
 
60
  try:
61
+ resp = requests.post(
62
  "https://openrouter.ai/api/v1/chat/completions",
63
  headers=headers,
64
  json=payload,
65
+ timeout=60
66
  )
67
+ resp.raise_for_status()
68
+ return resp.json()["choices"][0]["message"]["content"]
69
  except Exception as e:
70
+ return f"❌ Errore: {e}"
 
 
 
71
 
72
+ # ------------------------------------------------------------------
73
+ # Interfaccia Gradio
74
+ # ------------------------------------------------------------------
75
  def build_interface():
76
+ models = fetch_models()
 
 
 
 
 
 
 
 
77
 
78
+ with gr.Blocks(title="NotExistChatter – Tutti i modelli") as demo:
79
+ gr.Markdown("🤖project Adam🤖")
80
+ gr.Markdown("Il menù mostra **tutti** i modelli disponibili.")
81
  with gr.Row():
82
+ model_dropdown = gr.Dropdown(
83
+ choices=models,
84
+ value=models[0] if models else None,
85
+ label="Modello",
86
+ allow_custom_value=False,
87
+ interactive=True
88
+ )
89
+
90
+ prompt_box = gr.Textbox(
91
+ label="Prompt",
92
+ placeholder="Scrivi qui il tuo messaggio...",
93
+ lines=4,
94
+ max_lines=10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  )
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
+ output_box = gr.Textbox(
98
+ label="Risposta",
99
+ interactive=False,
100
+ lines=15,
101
+ max_lines=20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  )
103
+
104
+ send_btn = gr.Button("Invia", variant="primary")
105
+ send_btn.click(
106
+ fn=chat_with_openrouter,
107
+ inputs=[prompt_box, model_dropdown],
108
+ outputs=output_box
 
109
  )
 
 
 
 
110
 
111
+ prompt_box.submit(
112
+ fn=chat_with_openrouter,
113
+ inputs=[prompt_box, model_dropdown],
114
+ outputs=output_box
115
+ )
 
 
 
 
 
116
 
117
+ return demo
 
 
 
 
 
 
 
 
 
 
118
 
119
+ # Lancia l’interfaccia
 
 
120
  if __name__ == "__main__":
121
+ build_interface().launch()