0notexist0 commited on
Commit
9b5686a
Β·
verified Β·
1 Parent(s): 33517aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +167 -181
app.py CHANGED
@@ -1,187 +1,173 @@
1
- """ OpenRouter Chatbot – con storico conversazione + modelli separati + Telegram Run: python app.py """
2
-
3
- import os import functools import threading import requests import gradio as gr
4
-
5
- from telegram import Update from telegram.ext import ApplicationBuilder, CommandHandler, MessageHandler, filters, ContextTypes
6
-
7
- ------------------------------------------------------------------
8
-
9
- Configurazione
10
-
11
- ------------------------------------------------------------------
12
-
13
- OPENROUTER_API_KEY = os.getenv( "OPENROUTER_API_KEY", "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" ) TELEGRAM_BOT_TOKEN = "7873487915:AAFZYgAlt5-qr7jdXG1tKkG9KnAZeP6uC8w"
14
-
15
- ------------------------------------------------------------------
16
-
17
- Ottieni modelli raggruppati
18
-
19
- ------------------------------------------------------------------
20
-
21
- @functools.lru_cache(maxsize=1) def fetch_models_grouped() -> dict: headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"} try: resp = requests.get("https://openrouter.ai/api/v1/models", headers=headers, timeout=15) resp.raise_for_status() data = resp.json()
22
-
23
- reasoning_models = []
24
- casual_models = []
25
-
 
 
 
 
 
 
 
 
26
  for m in data["data"]:
27
- model_id = m["id"].lower()
28
- if any(key in model_id for key in ["gpt", "claude", "llama", "mistral", "mixtral", "gemini", "command-r", "qwen"]):
29
- reasoning_models.append(m["id"])
30
  else:
31
- casual_models.append(m["id"])
32
-
33
- return {
34
- "reasoning": sorted(reasoning_models),
35
- "casual": sorted(casual_models)
36
- }
37
- except Exception as e:
38
- return {
39
- "reasoning": ["openai/gpt-4-turbo"],
40
- "casual": []
41
- }
42
-
43
- ------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
- Format cronologia
46
-
47
- ------------------------------------------------------------------
48
-
49
- def format_history(history: list) -> str: output = "" for msg in history: if msg["role"] == "user": output += f"\U0001f9d1\u200d\U0001f4bb Tu:\n{msg['content']}\n\n" elif msg["role"] == "assistant": output += f"\U0001f916 Assistente:\n{msg['content']}\n\n" return output.strip()
50
-
51
- ------------------------------------------------------------------
52
-
53
- Chat
54
-
55
- ------------------------------------------------------------------
56
-
57
- def chat_with_openrouter(prompt: str, selected_model: str, history: list): headers = { "Authorization": f"Bearer {OPENROUTER_API_KEY}", "Content-Type": "application/json" }
58
-
59
- history.append({"role": "user", "content": prompt})
60
-
61
- payload = {
62
- "model": selected_model,
63
- "messages": history,
64
- "max_tokens": 4096,
65
- "temperature": 0.7,
66
- }
67
-
68
- try:
69
- resp = requests.post("https://openrouter.ai/api/v1/chat/completions", headers=headers, json=payload, timeout=60)
70
- resp.raise_for_status()
71
- reply = resp.json()["choices"][0]["message"]["content"]
72
  history.append({"role": "assistant", "content": reply})
73
  return history, format_history(history)
74
- except Exception as e:
75
- error_msg = f"\u274c Errore: {e}"
76
- history.append({"role": "assistant", "content": error_msg})
77
- return history, format_history(history)
78
-
79
- ------------------------------------------------------------------
80
-
81
- Interfaccia Gradio
82
-
83
- ------------------------------------------------------------------
84
-
85
- def build_interface(): grouped_models = fetch_models_grouped() default_model = grouped_models["reasoning"][0] if grouped_models["reasoning"] else ""
86
-
87
- with gr.Blocks(title="NotExistChatter – Chat con storico") as demo:
88
- gr.Markdown("## \U0001f916 Project Adam – Chat dinamica con modelli open source")
89
-
90
- saved_chats = gr.State({})
91
- current_chat_name = gr.State("Chat #1")
92
-
93
- with gr.Row():
94
- with gr.Column(scale=1):
95
- gr.Markdown("### \U0001f4ac Chat salvate")
96
- chat_selector = gr.Radio(choices=[], label="Storico", interactive=True)
97
- load_btn = gr.Button("\U0001f4c2 Carica Chat")
98
- new_chat_btn = gr.Button("\U0001f195 Nuova Chat")
99
- save_name = gr.Textbox(label="\U0001f4be Nome", placeholder="Es. brainstorming")
100
- save_btn = gr.Button("\U0001f4be Salva Chat")
101
-
102
- with gr.Column(scale=4):
103
- with gr.Row():
104
- reasoning_dropdown = gr.Dropdown(choices=grouped_models["reasoning"], label="\U0001f9e0 Modelli con Ragionamento", interactive=True)
105
- casual_dropdown = gr.Dropdown(choices=grouped_models["casual"], label="\u26a1\ufe0f Modelli Generici", interactive=True)
106
-
107
- output_box = gr.Textbox(label="Conversazione", interactive=False, lines=20, max_lines=40)
108
- prompt_box = gr.Textbox(label="Prompt", placeholder="Scrivi qui il tuo messaggio...", lines=4, max_lines=10)
109
- send_btn = gr.Button("Invia", variant="primary")
110
- chat_history = gr.State([])
111
-
112
- def resolve_model(prompt, reasoning_model, casual_model, history):
113
- model = reasoning_model or casual_model
114
- if not model:
115
- history.append({"role": "assistant", "content": "\u26a0\ufe0f Seleziona almeno un modello."})
116
- return history, format_history(history)
117
- return chat_with_openrouter(prompt, model, history)
118
-
119
- def reset_chat():
120
- return [], "", f"Chat #{os.urandom(2).hex()}", gr.update(value=None)
121
-
122
- def save_chat_fn(chats, name, history):
123
- if not name:
124
- return chats, gr.update(choices=list(chats.keys())), "\u26a0\ufe0f Inserisci un nome valido."
125
- chats[name] = history.copy()
126
- return chats, gr.update(choices=list(chats.keys())), f"\u2705 Chat salvata: '{name}'"
127
-
128
- def load_chat_fn(chats, selected_name):
129
- if not selected_name or selected_name not in chats:
130
- return [], "", "\u26a0\ufe0f Seleziona una chat valida."
131
- history = chats[selected_name]
132
- return history, format_history(history), selected_name
133
-
134
- send_btn.click(fn=resolve_model, inputs=[prompt_box, reasoning_dropdown, casual_dropdown, chat_history], outputs=[chat_history, output_box])
135
- prompt_box.submit(fn=resolve_model, inputs=[prompt_box, reasoning_dropdown, casual_dropdown, chat_history], outputs=[chat_history, output_box])
136
- new_chat_btn.click(fn=reset_chat, inputs=[], outputs=[chat_history, output_box, current_chat_name, chat_selector])
137
- save_btn.click(fn=save_chat_fn, inputs=[saved_chats, save_name, chat_history], outputs=[saved_chats, chat_selector, output_box])
138
- load_btn.click(fn=load_chat_fn, inputs=[saved_chats, chat_selector], outputs=[chat_history, output_box, current_chat_name])
139
-
140
- return demo
141
-
142
- ------------------------------------------------------------------
143
-
144
- Bot Telegram
145
-
146
- ------------------------------------------------------------------
147
-
148
- telegram_chat_states = {}
149
-
150
- async def start(update: Update, context: ContextTypes.DEFAULT_TYPE): chat_id = update.effective_chat.id telegram_chat_states[chat_id] = {"history": [], "model": fetch_models_grouped()["reasoning"][0]} await update.message.reply_text("\U0001f916 Benvenuto! Scrivimi un messaggio per iniziare a chattare.")
151
-
152
- async def setmodel(update: Update, context: ContextTypes.DEFAULT_TYPE): chat_id = update.effective_chat.id args = context.args available_models = fetch_models_grouped() all_models = available_models["reasoning"] + available_models["casual"]
153
-
154
- if not args:
155
- await update.message.reply_text("\U0001f4cc Specifica il modello. Es: /setmodel openai/gpt-4-turbo")
156
- return
157
-
158
- selected_model = args[0]
159
- if selected_model not in all_models:
160
- await update.message.reply_text("❌ Modello non valido. Scegli tra:\n" + "\n".join(all_models))
161
- return
162
-
163
- telegram_chat_states.setdefault(chat_id, {"history": [], "model": selected_model})
164
- telegram_chat_states[chat_id]["model"] = selected_model
165
- await update.message.reply_text(f"\u2705 Modello impostato: {selected_model}")
166
-
167
- async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE): chat_id = update.effective_chat.id user_input = update.message.text
168
-
169
- if chat_id not in telegram_chat_states:
170
- telegram_chat_states[chat_id] = {"history": [], "model": fetch_models_grouped()["reasoning"][0]}
171
-
172
- state = telegram_chat_states[chat_id]
173
- history, _ = chat_with_openrouter(user_input, state["model"], state["history"])
174
- reply = history[-1]["content"]
175
- telegram_chat_states[chat_id]["history"] = history
176
- await update.message.reply_text(reply)
177
-
178
- def run_telegram_bot(): app = ApplicationBuilder().token(TELEGRAM_BOT_TOKEN).build() app.add_handler(CommandHandler("start", start)) app.add_handler(CommandHandler("setmodel", setmodel)) app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_message)) print("\U0001f916 Telegram bot in esecuzione...") app.run_polling()
179
-
180
- ------------------------------------------------------------------
181
-
182
- Avvio
183
-
184
- ------------------------------------------------------------------
185
-
186
- if name == "main": threading.Thread(target=run_telegram_bot, daemon=True).start() build_interface().launch()
187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ # Hugging Face Space version
3
+ # Runs Gradio in the foreground, Telegram in a background thread
4
+ # -----------------------------------------------------------------
5
+
6
+ import os
7
+ import functools
8
+ import threading
9
+ import requests
10
+ import gradio as gr
11
+ from telegram import Update
12
+ from telegram.ext import ApplicationBuilder, CommandHandler, MessageHandler, filters, ContextTypes
13
+
14
+ # -----------------------------------------------------------------
15
+ # 1. Secrets (Hugging Face β†’ Settings β†’ Repository secrets)
16
+ # -----------------------------------------------------------------
17
+ OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY") # <-- set this in HF secrets
18
+ TELEGRAM_BOT_TOKEN = os.getenv("TELEGRAM_BOT_TOKEN") # <-- optional, only if you want TG
19
+
20
+ # -----------------------------------------------------------------
21
+ # 2. Fetch & group models
22
+ # -----------------------------------------------------------------
23
+ @functools.lru_cache(maxsize=1)
24
+ def fetch_models_grouped():
25
+ headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"}
26
+ try:
27
+ resp = requests.get("https://openrouter.ai/api/v1/models", headers=headers, timeout=15)
28
+ resp.raise_for_status()
29
+ data = resp.json()
30
+ except Exception:
31
+ return {"reasoning": ["openai/gpt-4-turbo"], "casual": []}
32
+
33
+ reasoning, casual = [], []
34
  for m in data["data"]:
35
+ mid = m["id"].lower()
36
+ if any(k in mid for k in ["gpt", "claude", "llama", "mistral", "gemini", "command", "qwen"]):
37
+ reasoning.append(m["id"])
38
  else:
39
+ casual.append(m["id"])
40
+ return {"reasoning": sorted(reasoning), "casual": sorted(casual)}
41
+
42
+ # -----------------------------------------------------------------
43
+ # 3. Format helpers
44
+ # -----------------------------------------------------------------
45
+ def format_history(history):
46
+ lines = []
47
+ for msg in history:
48
+ role = "Tu" if msg["role"] == "user" else "Assistente"
49
+ lines.append(f"{role}:\n{msg['content']}\n")
50
+ return "\n".join(lines).strip()
51
+
52
+ # -----------------------------------------------------------------
53
+ # 4. Chat core
54
+ # -----------------------------------------------------------------
55
+ def chat_with_openrouter(prompt: str, model: str, history: list):
56
+ if not model:
57
+ history.append({"role": "assistant", "content": "⚠️ Seleziona un modello prima di inviare."})
58
+ return history, format_history(history)
59
+
60
+ history.append({"role": "user", "content": prompt})
61
+ payload = {"model": model, "messages": history, "max_tokens": 4096, "temperature": 0.7}
62
+ headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}", "Content-Type": "application/json"}
63
+
64
+ try:
65
+ r = requests.post("https://openrouter.ai/api/v1/chat/completions", headers=headers, json=payload, timeout=60)
66
+ r.raise_for_status()
67
+ reply = r.json()["choices"][0]["message"]["content"]
68
+ except Exception as e:
69
+ reply = f"❌ Errore: {e}"
70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  history.append({"role": "assistant", "content": reply})
72
  return history, format_history(history)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
+ # -----------------------------------------------------------------
75
+ # 5. Gradio interface
76
+ # -----------------------------------------------------------------
77
+ def build_interface():
78
+ grouped = fetch_models_grouped()
79
+ default_model = grouped["reasoning"][0] if grouped["reasoning"] else ""
80
+
81
+ with gr.Blocks(title="OpenRouter Chat – HF Space") as demo:
82
+ gr.Markdown("## πŸ€– OpenRouter Chatbot (HF Space edition)")
83
+ saved_chats = gr.State({})
84
+ current_name = gr.State("Chat #1")
85
+ history = gr.State([])
86
+
87
+ with gr.Row():
88
+ with gr.Column(scale=1):
89
+ gr.Markdown("### πŸ’¬ Storico")
90
+ chat_selector = gr.Radio(choices=[], label="Salvate")
91
+ load_btn = gr.Button("πŸ“‚ Carica")
92
+ new_btn = gr.Button("πŸ†• Nuova")
93
+ save_name = gr.Textbox(label="Nome", placeholder="es. brainstorming")
94
+ save_btn = gr.Button("πŸ’Ύ Salva")
95
+
96
+ with gr.Column(scale=4):
97
+ reasoning_dd = gr.Dropdown(choices=grouped["reasoning"], label="🧠 Reasoning", value=default_model)
98
+ casual_dd = gr.Dropdown(choices=grouped["casual"], label="⚑ Casual")
99
+ output = gr.Textbox(label="Conversazione", lines=20, interactive=False)
100
+ prompt = gr.Textbox(label="Prompt", lines=3, placeholder="Scrivi qui...")
101
+
102
+ # helpers
103
+ def resolve(prompt, r, c, hist):
104
+ model = r or c
105
+ return chat_with_openrouter(prompt, model, hist)
106
+
107
+ def new_chat():
108
+ return [], "", f"Chat #{os.urandom(2).hex()}", gr.update(choices=[])
109
+
110
+ def save_fn(chats, name, hist):
111
+ if not name:
112
+ return chats, gr.update(), "⚠️ Inserisci un nome"
113
+ chats[name] = hist.copy()
114
+ return chats, gr.update(choices=list(chats.keys())), f"βœ… Salvata '{name}'"
115
+
116
+ def load_fn(chats, sel):
117
+ return (chats[sel], format_history(chats[sel]), sel) if sel in chats else ([], "", "⚠️ Seleziona chat")
118
+
119
+ # wiring
120
+ prompt.submit(fn=resolve, inputs=[prompt, reasoning_dd, casual_dd, history], outputs=[history, output])
121
+ save_btn.click(fn=save_fn, inputs=[saved_chats, save_name, history], outputs=[saved_chats, chat_selector, output])
122
+ load_btn.click(fn=load_fn, inputs=[saved_chats, chat_selector], outputs=[history, output, current_name])
123
+ new_btn.click(fn=new_chat, outputs=[history, output, current_name, chat_selector])
124
+
125
+ return demo
126
+
127
+ # -----------------------------------------------------------------
128
+ # 6. Optional Telegram bot (background)
129
+ # -----------------------------------------------------------------
130
+ telegram_states = {}
131
+
132
+ async def tg_start(update: Update, ctx):
133
+ chat_id = update.effective_chat.id
134
+ telegram_states[chat_id] = {"history": [], "model": fetch_models_grouped()["reasoning"][0]}
135
+ await update.message.reply_text("πŸ‘‹ Benvenuto! Scrivimi pure.")
136
+
137
+ async def tg_setmodel(update: Update, ctx):
138
+ chat_id = update.effective_chat.id
139
+ all_models = fetch_models_grouped()["reasoning"] + fetch_models_grouped()["casual"]
140
+ if not ctx.args or ctx.args[0] not in all_models:
141
+ await update.message.reply_text("❌ Modello non valido.\nEsempio: /setmodel openai/gpt-4-turbo")
142
+ return
143
+ telegram_states.setdefault(chat_id, {})["model"] = ctx.args[0]
144
+ await update.message.reply_text(f"βœ… Modello: {ctx.args[0]}")
145
+
146
+ async def tg_msg(update: Update, ctx):
147
+ chat_id = update.effective_chat.id
148
+ user_input = update.message.text
149
+ state = telegram_states.setdefault(chat_id, {"history": [], "model": fetch_models_grouped()["reasoning"][0]})
150
+ hist, _ = chat_with_openrouter(user_input, state["model"], state["history"])
151
+ reply = hist[-1]["content"]
152
+ state["history"] = hist
153
+ await update.message.reply_text(reply)
154
+
155
+ def run_telegram():
156
+ if not TELEGRAM_BOT_TOKEN:
157
+ return
158
+ app = ApplicationBuilder().token(TELEGRAM_BOT_TOKEN).build()
159
+ app.add_handler(CommandHandler("start", tg_start))
160
+ app.add_handler(CommandHandler("setmodel", tg_setmodel))
161
+ app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, tg_msg))
162
+ print("πŸ€– Telegram bot in background...")
163
+ app.run_polling(stop_signals=None)
164
+
165
+ # -----------------------------------------------------------------
166
+ # 7. Entry-point
167
+ # -----------------------------------------------------------------
168
+ if __name__ == "__main__":
169
+ # Telegram thread (optional)
170
+ if TELEGRAM_BOT_TOKEN:
171
+ threading.Thread(target=run_telegram, daemon=True).start()
172
+ # Launch Gradio (HF Space)
173
+ build_interface().queue().launch()