Spaces:
Build error
Build error
Update main.py
Browse files
main.py
CHANGED
|
@@ -7,7 +7,6 @@ from huggingface_hub import hf_hub_download
|
|
| 7 |
from llama_cpp import Llama
|
| 8 |
|
| 9 |
# --- CONFIGURATION ---
|
| 10 |
-
# We use the FAST model (Llama 3.2 3B) for Telegram speed
|
| 11 |
REPO_ID = "hugging-quants/Llama-3.2-3B-Instruct-Q4_K_M-GGUF"
|
| 12 |
FILENAME = "llama-3.2-3b-instruct-q4_k_m.gguf"
|
| 13 |
|
|
@@ -29,7 +28,7 @@ def load_brain():
|
|
| 29 |
print("🧠 LOADING AXIOM INTO RAM...")
|
| 30 |
axiom_model = Llama(
|
| 31 |
model_path=model_path,
|
| 32 |
-
n_ctx=2048,
|
| 33 |
n_threads=2,
|
| 34 |
verbose=False
|
| 35 |
)
|
|
@@ -53,7 +52,7 @@ async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
|
| 53 |
# Notify user we are thinking (Typing status)
|
| 54 |
await context.bot.send_chat_action(chat_id=update.effective_chat.id, action="typing")
|
| 55 |
|
| 56 |
-
# 1. Format Prompt
|
| 57 |
prompt = f"""<|start_header_id|>system<|end_header_id|>
|
| 58 |
|
| 59 |
You are Axiom 3.1, the Sovereign AI of Cool Shot Systems, created by Professor Heritage.
|
|
@@ -62,8 +61,7 @@ You are helpful, strategic, and concise.<|eot_id|><|start_header_id|>user<|end_h
|
|
| 62 |
{user_text}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
|
| 63 |
|
| 64 |
"""
|
| 65 |
-
# 2. Generate
|
| 66 |
-
# We run this in a thread so we don't block the bot
|
| 67 |
output = await asyncio.to_thread(
|
| 68 |
axiom_model,
|
| 69 |
prompt,
|
|
@@ -83,12 +81,8 @@ async def startup_event():
|
|
| 83 |
# 1. Load Brain
|
| 84 |
load_brain()
|
| 85 |
|
| 86 |
-
# 2.
|
| 87 |
-
|
| 88 |
-
token = "8229846436:AAGa3MierT7lZV3WOdE1ebi2FaOoqVE4nLI"
|
| 89 |
-
if not token:
|
| 90 |
-
print("❌ NO TELEGRAM TOKEN FOUND!")
|
| 91 |
-
return
|
| 92 |
|
| 93 |
print("🤖 STARTING TELEGRAM BOT...")
|
| 94 |
global bot_app
|
|
@@ -102,11 +96,11 @@ token = "8229846436:AAGa3MierT7lZV3WOdE1ebi2FaOoqVE4nLI"
|
|
| 102 |
await bot_app.initialize()
|
| 103 |
await bot_app.start()
|
| 104 |
|
| 105 |
-
# Run polling in a separate
|
| 106 |
asyncio.create_task(bot_app.updater.start_polling())
|
| 107 |
print("✅ TELEGRAM BOT LISTENING")
|
| 108 |
|
| 109 |
-
# --- DUMMY SERVER
|
| 110 |
@app.get("/")
|
| 111 |
def home():
|
| 112 |
return {"status": "Axiom Telegram Bot Running"}
|
|
|
|
| 7 |
from llama_cpp import Llama
|
| 8 |
|
| 9 |
# --- CONFIGURATION ---
|
|
|
|
| 10 |
REPO_ID = "hugging-quants/Llama-3.2-3B-Instruct-Q4_K_M-GGUF"
|
| 11 |
FILENAME = "llama-3.2-3b-instruct-q4_k_m.gguf"
|
| 12 |
|
|
|
|
| 28 |
print("🧠 LOADING AXIOM INTO RAM...")
|
| 29 |
axiom_model = Llama(
|
| 30 |
model_path=model_path,
|
| 31 |
+
n_ctx=2048,
|
| 32 |
n_threads=2,
|
| 33 |
verbose=False
|
| 34 |
)
|
|
|
|
| 52 |
# Notify user we are thinking (Typing status)
|
| 53 |
await context.bot.send_chat_action(chat_id=update.effective_chat.id, action="typing")
|
| 54 |
|
| 55 |
+
# 1. Format Prompt
|
| 56 |
prompt = f"""<|start_header_id|>system<|end_header_id|>
|
| 57 |
|
| 58 |
You are Axiom 3.1, the Sovereign AI of Cool Shot Systems, created by Professor Heritage.
|
|
|
|
| 61 |
{user_text}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
|
| 62 |
|
| 63 |
"""
|
| 64 |
+
# 2. Generate in a thread
|
|
|
|
| 65 |
output = await asyncio.to_thread(
|
| 66 |
axiom_model,
|
| 67 |
prompt,
|
|
|
|
| 81 |
# 1. Load Brain
|
| 82 |
load_brain()
|
| 83 |
|
| 84 |
+
# 2. Hardcoded Token
|
| 85 |
+
token = "8229846436:AAGa3MierT7lZV3WOdE1ebi2FaOoqVE4nLI"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
|
| 87 |
print("🤖 STARTING TELEGRAM BOT...")
|
| 88 |
global bot_app
|
|
|
|
| 96 |
await bot_app.initialize()
|
| 97 |
await bot_app.start()
|
| 98 |
|
| 99 |
+
# Run polling in a separate task
|
| 100 |
asyncio.create_task(bot_app.updater.start_polling())
|
| 101 |
print("✅ TELEGRAM BOT LISTENING")
|
| 102 |
|
| 103 |
+
# --- DUMMY SERVER ---
|
| 104 |
@app.get("/")
|
| 105 |
def home():
|
| 106 |
return {"status": "Axiom Telegram Bot Running"}
|