0notexist0 commited on
Commit
33517aa
Β·
verified Β·
1 Parent(s): 3a1a296

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +183 -209
app.py CHANGED
@@ -1,213 +1,187 @@
1
- """
2
- OpenRouter Chatbot – con storico conversazione + modelli separati
3
- Run: gradio app.py
4
- """
5
-
6
- import os
7
- import functools
8
- import requests
9
- import gradio as gr
10
-
11
- # ------------------------------------------------------------------
12
- # Configurazione
13
- # ------------------------------------------------------------------
14
- OPENROUTER_API_KEY = os.getenv(
15
- "OPENROUTER_API_KEY",
16
- "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"
17
- )
18
-
19
- # ------------------------------------------------------------------
20
- # Ottieni modelli raggruppati
21
- # ------------------------------------------------------------------
22
- @functools.lru_cache(maxsize=1)
23
- def fetch_models_grouped() -> dict:
24
- headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"}
25
- try:
26
- resp = requests.get("https://openrouter.ai/api/v1/models", headers=headers, timeout=15)
27
- resp.raise_for_status()
28
- data = resp.json()
29
-
30
- reasoning_models = []
31
- casual_models = []
32
-
33
- for m in data["data"]:
34
- model_id = m["id"].lower()
35
- if any(key in model_id for key in ["gpt", "claude", "llama", "mistral", "mixtral", "gemini", "command-r", "qwen"]):
36
- reasoning_models.append(m["id"])
37
- else:
38
- casual_models.append(m["id"])
39
-
40
- return {
41
- "reasoning": sorted(reasoning_models),
42
- "casual": sorted(casual_models)
43
- }
44
- except Exception as e:
45
- return {
46
- "reasoning": ["openai/gpt-4-turbo"],
47
- "casual": []
48
- }
49
-
50
- # ------------------------------------------------------------------
51
- # Format cronologia
52
- # ------------------------------------------------------------------
53
- def format_history(history: list) -> str:
54
- output = ""
55
- for msg in history:
56
- if msg["role"] == "user":
57
- output += f"πŸ§‘β€πŸ’» **Tu**:\n{msg['content']}\n\n"
58
- elif msg["role"] == "assistant":
59
- output += f"πŸ€– **Assistente**:\n{msg['content']}\n\n"
60
- return output.strip()
61
-
62
- # ------------------------------------------------------------------
63
- # Chat
64
- # ------------------------------------------------------------------
65
- def chat_with_openrouter(prompt: str, selected_model: str, history: list):
66
- headers = {
67
- "Authorization": f"Bearer {OPENROUTER_API_KEY}",
68
- "Content-Type": "application/json"
69
- }
70
 
71
- history.append({"role": "user", "content": prompt})
72
 
73
- payload = {
74
- "model": selected_model,
75
- "messages": history,
76
- "max_tokens": 4096,
77
- "temperature": 0.7,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  }
79
 
80
- try:
81
- resp = requests.post("https://openrouter.ai/api/v1/chat/completions", headers=headers, json=payload, timeout=60)
82
- resp.raise_for_status()
83
- reply = resp.json()["choices"][0]["message"]["content"]
84
- history.append({"role": "assistant", "content": reply})
85
- return history, format_history(history)
86
- except Exception as e:
87
- error_msg = f"❌ Errore: {e}"
88
- history.append({"role": "assistant", "content": error_msg})
89
- return history, format_history(history)
90
-
91
- # ------------------------------------------------------------------
92
- # Interfaccia utente
93
- # ------------------------------------------------------------------
94
- def build_interface():
95
- grouped_models = fetch_models_grouped()
96
- default_model = grouped_models["reasoning"][0] if grouped_models["reasoning"] else ""
97
-
98
- with gr.Blocks(title="NotExistChatter – Chat con storico") as demo:
99
- gr.Markdown("## πŸ€– Project Adam – Chat dinamica con modelli open source")
100
-
101
- saved_chats = gr.State({})
102
- current_chat_name = gr.State("Chat #1")
103
-
104
- with gr.Row():
105
- # Sidebar sinistra
106
- with gr.Column(scale=1):
107
- gr.Markdown("### πŸ’¬ Chat salvate")
108
-
109
- chat_selector = gr.Radio(
110
- choices=[], label="Storico", interactive=True
111
- )
112
- load_btn = gr.Button("πŸ“‚ Carica Chat")
113
- new_chat_btn = gr.Button("πŸ†• Nuova Chat")
114
- save_name = gr.Textbox(label="πŸ’Ύ Nome", placeholder="Es. brainstorming")
115
- save_btn = gr.Button("πŸ’Ύ Salva Chat")
116
-
117
- # Area centrale
118
- with gr.Column(scale=4):
119
- with gr.Row():
120
- reasoning_dropdown = gr.Dropdown(
121
- choices=grouped_models["reasoning"],
122
- label="🧠 Modelli con Ragionamento",
123
- interactive=True
124
- )
125
- casual_dropdown = gr.Dropdown(
126
- choices=grouped_models["casual"],
127
- label="⚑️ Modelli Generici",
128
- interactive=True
129
- )
130
-
131
- output_box = gr.Textbox(
132
- label="Conversazione",
133
- interactive=False,
134
- lines=20,
135
- max_lines=40
136
- )
137
-
138
- prompt_box = gr.Textbox(
139
- label="Prompt",
140
- placeholder="Scrivi qui il tuo messaggio...",
141
- lines=4,
142
- max_lines=10
143
- )
144
-
145
- send_btn = gr.Button("Invia", variant="primary")
146
- chat_history = gr.State([])
147
-
148
- # ------------------------------------------------------------------
149
- # Funzioni interazione
150
- # ------------------------------------------------------------------
151
-
152
- def resolve_model(prompt, reasoning_model, casual_model, history):
153
- model = reasoning_model or casual_model
154
- if not model:
155
- history.append({"role": "assistant", "content": "⚠️ Seleziona almeno un modello."})
156
- return history, format_history(history)
157
- return chat_with_openrouter(prompt, model, history)
158
-
159
- def reset_chat():
160
- return [], "", f"Chat #{os.urandom(2).hex()}", gr.update(value=None)
161
-
162
- def save_chat_fn(chats, name, history):
163
- if not name:
164
- return chats, gr.update(choices=list(chats.keys())), "⚠️ Inserisci un nome valido."
165
- chats[name] = history.copy()
166
- return chats, gr.update(choices=list(chats.keys())), f"βœ… Chat salvata: '{name}'"
167
-
168
- def load_chat_fn(chats, selected_name):
169
- if not selected_name or selected_name not in chats:
170
- return [], "", "⚠️ Seleziona una chat valida."
171
- history = chats[selected_name]
172
- return history, format_history(history), selected_name
173
-
174
- # ------------------------------------------------------------------
175
- # Eventi
176
- # ------------------------------------------------------------------
177
- send_btn.click(
178
- fn=resolve_model,
179
- inputs=[prompt_box, reasoning_dropdown, casual_dropdown, chat_history],
180
- outputs=[chat_history, output_box]
181
- )
182
-
183
- prompt_box.submit(
184
- fn=resolve_model,
185
- inputs=[prompt_box, reasoning_dropdown, casual_dropdown, chat_history],
186
- outputs=[chat_history, output_box]
187
- )
188
-
189
- new_chat_btn.click(
190
- fn=reset_chat,
191
- inputs=[],
192
- outputs=[chat_history, output_box, current_chat_name, chat_selector]
193
- )
194
-
195
- save_btn.click(
196
- fn=save_chat_fn,
197
- inputs=[saved_chats, save_name, chat_history],
198
- outputs=[saved_chats, chat_selector, output_box]
199
- )
200
-
201
- load_btn.click(
202
- fn=load_chat_fn,
203
- inputs=[saved_chats, chat_selector],
204
- outputs=[chat_history, output_box, current_chat_name]
205
- )
206
-
207
- return demo
208
-
209
- # ------------------------------------------------------------------
210
- # Avvio
211
- # ------------------------------------------------------------------
212
- if __name__ == "__main__":
213
- build_interface().launch()
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ OpenRouter Chatbot – con storico conversazione + modelli separati + Telegram Run: python app.py """
2
+
3
+ import os import functools import threading import requests import gradio as gr
4
+
5
+ from telegram import Update from telegram.ext import ApplicationBuilder, CommandHandler, MessageHandler, filters, ContextTypes
6
+
7
+ ------------------------------------------------------------------
8
+
9
+ Configurazione
10
+
11
+ ------------------------------------------------------------------
12
+
13
+ OPENROUTER_API_KEY = os.getenv( "OPENROUTER_API_KEY", "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" ) TELEGRAM_BOT_TOKEN = "7873487915:AAFZYgAlt5-qr7jdXG1tKkG9KnAZeP6uC8w"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ ------------------------------------------------------------------
16
 
17
+ Ottieni modelli raggruppati
18
+
19
+ ------------------------------------------------------------------
20
+
21
+ @functools.lru_cache(maxsize=1) def fetch_models_grouped() -> dict: headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"} try: resp = requests.get("https://openrouter.ai/api/v1/models", headers=headers, timeout=15) resp.raise_for_status() data = resp.json()
22
+
23
+ reasoning_models = []
24
+ casual_models = []
25
+
26
+ for m in data["data"]:
27
+ model_id = m["id"].lower()
28
+ if any(key in model_id for key in ["gpt", "claude", "llama", "mistral", "mixtral", "gemini", "command-r", "qwen"]):
29
+ reasoning_models.append(m["id"])
30
+ else:
31
+ casual_models.append(m["id"])
32
+
33
+ return {
34
+ "reasoning": sorted(reasoning_models),
35
+ "casual": sorted(casual_models)
36
+ }
37
+ except Exception as e:
38
+ return {
39
+ "reasoning": ["openai/gpt-4-turbo"],
40
+ "casual": []
41
  }
42
 
43
+ ------------------------------------------------------------------
44
+
45
+ Format cronologia
46
+
47
+ ------------------------------------------------------------------
48
+
49
+ def format_history(history: list) -> str: output = "" for msg in history: if msg["role"] == "user": output += f"\U0001f9d1\u200d\U0001f4bb Tu:\n{msg['content']}\n\n" elif msg["role"] == "assistant": output += f"\U0001f916 Assistente:\n{msg['content']}\n\n" return output.strip()
50
+
51
+ ------------------------------------------------------------------
52
+
53
+ Chat
54
+
55
+ ------------------------------------------------------------------
56
+
57
+ def chat_with_openrouter(prompt: str, selected_model: str, history: list): headers = { "Authorization": f"Bearer {OPENROUTER_API_KEY}", "Content-Type": "application/json" }
58
+
59
+ history.append({"role": "user", "content": prompt})
60
+
61
+ payload = {
62
+ "model": selected_model,
63
+ "messages": history,
64
+ "max_tokens": 4096,
65
+ "temperature": 0.7,
66
+ }
67
+
68
+ try:
69
+ resp = requests.post("https://openrouter.ai/api/v1/chat/completions", headers=headers, json=payload, timeout=60)
70
+ resp.raise_for_status()
71
+ reply = resp.json()["choices"][0]["message"]["content"]
72
+ history.append({"role": "assistant", "content": reply})
73
+ return history, format_history(history)
74
+ except Exception as e:
75
+ error_msg = f"\u274c Errore: {e}"
76
+ history.append({"role": "assistant", "content": error_msg})
77
+ return history, format_history(history)
78
+
79
+ ------------------------------------------------------------------
80
+
81
+ Interfaccia Gradio
82
+
83
+ ------------------------------------------------------------------
84
+
85
+ def build_interface(): grouped_models = fetch_models_grouped() default_model = grouped_models["reasoning"][0] if grouped_models["reasoning"] else ""
86
+
87
+ with gr.Blocks(title="NotExistChatter – Chat con storico") as demo:
88
+ gr.Markdown("## \U0001f916 Project Adam – Chat dinamica con modelli open source")
89
+
90
+ saved_chats = gr.State({})
91
+ current_chat_name = gr.State("Chat #1")
92
+
93
+ with gr.Row():
94
+ with gr.Column(scale=1):
95
+ gr.Markdown("### \U0001f4ac Chat salvate")
96
+ chat_selector = gr.Radio(choices=[], label="Storico", interactive=True)
97
+ load_btn = gr.Button("\U0001f4c2 Carica Chat")
98
+ new_chat_btn = gr.Button("\U0001f195 Nuova Chat")
99
+ save_name = gr.Textbox(label="\U0001f4be Nome", placeholder="Es. brainstorming")
100
+ save_btn = gr.Button("\U0001f4be Salva Chat")
101
+
102
+ with gr.Column(scale=4):
103
+ with gr.Row():
104
+ reasoning_dropdown = gr.Dropdown(choices=grouped_models["reasoning"], label="\U0001f9e0 Modelli con Ragionamento", interactive=True)
105
+ casual_dropdown = gr.Dropdown(choices=grouped_models["casual"], label="\u26a1\ufe0f Modelli Generici", interactive=True)
106
+
107
+ output_box = gr.Textbox(label="Conversazione", interactive=False, lines=20, max_lines=40)
108
+ prompt_box = gr.Textbox(label="Prompt", placeholder="Scrivi qui il tuo messaggio...", lines=4, max_lines=10)
109
+ send_btn = gr.Button("Invia", variant="primary")
110
+ chat_history = gr.State([])
111
+
112
+ def resolve_model(prompt, reasoning_model, casual_model, history):
113
+ model = reasoning_model or casual_model
114
+ if not model:
115
+ history.append({"role": "assistant", "content": "\u26a0\ufe0f Seleziona almeno un modello."})
116
+ return history, format_history(history)
117
+ return chat_with_openrouter(prompt, model, history)
118
+
119
+ def reset_chat():
120
+ return [], "", f"Chat #{os.urandom(2).hex()}", gr.update(value=None)
121
+
122
+ def save_chat_fn(chats, name, history):
123
+ if not name:
124
+ return chats, gr.update(choices=list(chats.keys())), "\u26a0\ufe0f Inserisci un nome valido."
125
+ chats[name] = history.copy()
126
+ return chats, gr.update(choices=list(chats.keys())), f"\u2705 Chat salvata: '{name}'"
127
+
128
+ def load_chat_fn(chats, selected_name):
129
+ if not selected_name or selected_name not in chats:
130
+ return [], "", "\u26a0\ufe0f Seleziona una chat valida."
131
+ history = chats[selected_name]
132
+ return history, format_history(history), selected_name
133
+
134
+ send_btn.click(fn=resolve_model, inputs=[prompt_box, reasoning_dropdown, casual_dropdown, chat_history], outputs=[chat_history, output_box])
135
+ prompt_box.submit(fn=resolve_model, inputs=[prompt_box, reasoning_dropdown, casual_dropdown, chat_history], outputs=[chat_history, output_box])
136
+ new_chat_btn.click(fn=reset_chat, inputs=[], outputs=[chat_history, output_box, current_chat_name, chat_selector])
137
+ save_btn.click(fn=save_chat_fn, inputs=[saved_chats, save_name, chat_history], outputs=[saved_chats, chat_selector, output_box])
138
+ load_btn.click(fn=load_chat_fn, inputs=[saved_chats, chat_selector], outputs=[chat_history, output_box, current_chat_name])
139
+
140
+ return demo
141
+
142
+ ------------------------------------------------------------------
143
+
144
+ Bot Telegram
145
+
146
+ ------------------------------------------------------------------
147
+
148
+ telegram_chat_states = {}
149
+
150
+ async def start(update: Update, context: ContextTypes.DEFAULT_TYPE): chat_id = update.effective_chat.id telegram_chat_states[chat_id] = {"history": [], "model": fetch_models_grouped()["reasoning"][0]} await update.message.reply_text("\U0001f916 Benvenuto! Scrivimi un messaggio per iniziare a chattare.")
151
+
152
+ async def setmodel(update: Update, context: ContextTypes.DEFAULT_TYPE): chat_id = update.effective_chat.id args = context.args available_models = fetch_models_grouped() all_models = available_models["reasoning"] + available_models["casual"]
153
+
154
+ if not args:
155
+ await update.message.reply_text("\U0001f4cc Specifica il modello. Es: /setmodel openai/gpt-4-turbo")
156
+ return
157
+
158
+ selected_model = args[0]
159
+ if selected_model not in all_models:
160
+ await update.message.reply_text("❌ Modello non valido. Scegli tra:\n" + "\n".join(all_models))
161
+ return
162
+
163
+ telegram_chat_states.setdefault(chat_id, {"history": [], "model": selected_model})
164
+ telegram_chat_states[chat_id]["model"] = selected_model
165
+ await update.message.reply_text(f"\u2705 Modello impostato: {selected_model}")
166
+
167
+ async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE): chat_id = update.effective_chat.id user_input = update.message.text
168
+
169
+ if chat_id not in telegram_chat_states:
170
+ telegram_chat_states[chat_id] = {"history": [], "model": fetch_models_grouped()["reasoning"][0]}
171
+
172
+ state = telegram_chat_states[chat_id]
173
+ history, _ = chat_with_openrouter(user_input, state["model"], state["history"])
174
+ reply = history[-1]["content"]
175
+ telegram_chat_states[chat_id]["history"] = history
176
+ await update.message.reply_text(reply)
177
+
178
+ def run_telegram_bot(): app = ApplicationBuilder().token(TELEGRAM_BOT_TOKEN).build() app.add_handler(CommandHandler("start", start)) app.add_handler(CommandHandler("setmodel", setmodel)) app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_message)) print("\U0001f916 Telegram bot in esecuzione...") app.run_polling()
179
+
180
+ ------------------------------------------------------------------
181
+
182
+ Avvio
183
+
184
+ ------------------------------------------------------------------
185
+
186
+ if name == "main": threading.Thread(target=run_telegram_bot, daemon=True).start() build_interface().launch()
187
+