ProfessorCEO commited on
Commit
6b66219
·
verified ·
1 Parent(s): cce41ce

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +40 -31
main.py CHANGED
@@ -1,14 +1,21 @@
1
  import os
2
  import asyncio
 
3
  from fastapi import FastAPI
4
  from telegram import Update
5
  from telegram.ext import Application, CommandHandler, MessageHandler, filters, ContextTypes
6
  from huggingface_hub import hf_hub_download
7
  from llama_cpp import Llama
8
 
 
 
 
 
9
  # --- CONFIGURATION ---
10
  REPO_ID = "hugging-quants/Llama-3.2-3B-Instruct-Q4_K_M-GGUF"
11
  FILENAME = "llama-3.2-3b-instruct-q4_k_m.gguf"
 
 
12
 
13
  # Global Variables
14
  app = FastAPI()
@@ -38,21 +45,19 @@ def load_brain():
38
 
39
  # --- TELEGRAM BOT LOGIC ---
40
  async def start_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
41
- """The /start command"""
42
  await update.message.reply_text("Axiom 3.1 Sovereign Interface Online.\nCool Shot Systems Proprietary.\n\nSend me a message.")
43
 
44
  async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE):
45
- """Handles normal text messages"""
46
  user_text = update.message.text
47
 
48
  if not axiom_model:
49
  await update.message.reply_text("⚠️ Axiom is still waking up... please wait 30 seconds.")
50
  return
51
 
52
- # Notify user we are thinking (Typing status)
53
  await context.bot.send_chat_action(chat_id=update.effective_chat.id, action="typing")
54
 
55
- # 1. Format Prompt
56
  prompt = f"""<|start_header_id|>system<|end_header_id|>
57
 
58
  You are Axiom 3.1, the Sovereign AI of Cool Shot Systems, created by Professor Heritage.
@@ -61,44 +66,48 @@ You are helpful, strategic, and concise.<|eot_id|><|start_header_id|>user<|end_h
61
  {user_text}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
62
 
63
  """
64
- # 2. Generate in a thread
65
- output = await asyncio.to_thread(
66
- axiom_model,
67
- prompt,
68
- max_tokens=256,
69
- stop=["<|eot_id|>", "<|end_of_text|>"],
70
- echo=False
71
- )
72
-
73
- response = output['choices'][0]['text']
74
-
75
- # 3. Reply
76
- await update.message.reply_text(response)
77
 
78
- # --- STARTUP SEQUENCE ---
79
  @app.on_event("startup")
80
  async def startup_event():
81
  # 1. Load Brain
82
  load_brain()
83
 
84
- # 2. Hardcoded Token
85
- token = "8229846436:AAGa3MierT7lZV3WOdE1ebi2FaOoqVE4nLI"
86
-
87
- print("🤖 STARTING TELEGRAM BOT...")
88
  global bot_app
89
- bot_app = Application.builder().token(token).build()
90
 
91
- # Add Handlers
92
  bot_app.add_handler(CommandHandler("start", start_command))
93
  bot_app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_message))
94
 
95
- # Initialize and Start Polling
96
- await bot_app.initialize()
97
- await bot_app.start()
98
-
99
- # Run polling in a separate task
100
- asyncio.create_task(bot_app.updater.start_polling())
101
- print("✅ TELEGRAM BOT LISTENING")
 
 
 
 
 
 
 
102
 
103
  # --- DUMMY SERVER ---
104
  @app.get("/")
 
1
  import os
2
  import asyncio
3
+ import logging
4
  from fastapi import FastAPI
5
  from telegram import Update
6
  from telegram.ext import Application, CommandHandler, MessageHandler, filters, ContextTypes
7
  from huggingface_hub import hf_hub_download
8
  from llama_cpp import Llama
9
 
10
+ # Setup Logging
11
+ logging.basicConfig(level=logging.INFO)
12
+ logger = logging.getLogger(__name__)
13
+
14
  # --- CONFIGURATION ---
15
  REPO_ID = "hugging-quants/Llama-3.2-3B-Instruct-Q4_K_M-GGUF"
16
  FILENAME = "llama-3.2-3b-instruct-q4_k_m.gguf"
17
+ # HARDCODED TOKEN
18
+ TELEGRAM_TOKEN = "8229846436:AAGa3MierT7lZV3WOdE1ebi2FaOoqVE4nLI"
19
 
20
  # Global Variables
21
  app = FastAPI()
 
45
 
46
  # --- TELEGRAM BOT LOGIC ---
47
  async def start_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
 
48
  await update.message.reply_text("Axiom 3.1 Sovereign Interface Online.\nCool Shot Systems Proprietary.\n\nSend me a message.")
49
 
50
  async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE):
 
51
  user_text = update.message.text
52
 
53
  if not axiom_model:
54
  await update.message.reply_text("⚠️ Axiom is still waking up... please wait 30 seconds.")
55
  return
56
 
57
+ # Typing status
58
  await context.bot.send_chat_action(chat_id=update.effective_chat.id, action="typing")
59
 
60
+ # Prompt
61
  prompt = f"""<|start_header_id|>system<|end_header_id|>
62
 
63
  You are Axiom 3.1, the Sovereign AI of Cool Shot Systems, created by Professor Heritage.
 
66
  {user_text}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
67
 
68
  """
69
+ # Generate
70
+ try:
71
+ output = await asyncio.to_thread(
72
+ axiom_model,
73
+ prompt,
74
+ max_tokens=256,
75
+ stop=["<|eot_id|>", "<|end_of_text|>"],
76
+ echo=False
77
+ )
78
+ response = output['choices'][0]['text']
79
+ await update.message.reply_text(response)
80
+ except Exception as e:
81
+ await update.message.reply_text(f"❌ Processing Error: {e}")
82
 
83
+ # --- ROBUST STARTUP SEQUENCE ---
84
  @app.on_event("startup")
85
  async def startup_event():
86
  # 1. Load Brain
87
  load_brain()
88
 
89
+ # 2. Start Telegram Bot with RETRY LOOP
 
 
 
90
  global bot_app
91
+ print("🤖 ATTEMPTING TELEGRAM CONNECTION...")
92
 
93
+ bot_app = Application.builder().token(TELEGRAM_TOKEN).build()
94
  bot_app.add_handler(CommandHandler("start", start_command))
95
  bot_app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_message))
96
 
97
+ # Retry logic for network lag
98
+ for i in range(5):
99
+ try:
100
+ await bot_app.initialize()
101
+ await bot_app.start()
102
+ await bot_app.updater.start_polling()
103
+ print("✅ TELEGRAM BOT CONNECTED AND LISTENING")
104
+ return # Success!
105
+ except Exception as e:
106
+ print(f"⚠️ Connection Attempt {i+1} Failed: {e}")
107
+ print("⏳ Waiting 5 seconds for network to wake up...")
108
+ await asyncio.sleep(5)
109
+
110
+ print("❌ TELEGRAM FAILED AFTER 5 ATTEMPTS. SERVER STILL RUNNING.")
111
 
112
  # --- DUMMY SERVER ---
113
  @app.get("/")