OrbitMC commited on
Commit
ad249f5
Β·
verified Β·
1 Parent(s): a055356

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -21
app.py CHANGED
@@ -5,9 +5,11 @@ import base64
5
  import datetime
6
  import traceback
7
  import asyncio
 
8
  from flask import Flask, request, jsonify
9
  from num2words import num2words
10
- from transformers import pipeline
 
11
 
12
  # ══════════════════════════════════════════
13
  # CONFIG
@@ -42,7 +44,7 @@ def clean_text_for_tts(text):
42
  return text
43
 
44
  # ══════════════════════════════════════════
45
- # LOAD UNSLOTH GGUF & EDGE-TTS
46
  # ══════════════════════════════════════════
47
  print("=" * 55)
48
  print(" J.A.R.V.I.S. β€” Booting Systems")
@@ -51,20 +53,23 @@ print("=" * 55)
51
  LLM_ID = "unsloth/LFM2.5-1.2B-Instruct-GGUF"
52
  GGUF_FILE = "LFM2.5-1.2B-Instruct-UD-Q8_K_XL.gguf"
53
 
54
- print(f"[1/2] Loading {GGUF_FILE} via pipeline...")
55
  try:
56
- # Explicitly load the exact GGUF file requested
57
- pipe = pipeline(
58
- "text-generation",
59
- model=LLM_ID,
60
- device_map="cpu",
61
- model_kwargs={"gguf_file": GGUF_FILE}
 
 
 
62
  )
63
- print(f" βœ… {LLM_ID} loaded with {GGUF_FILE}!")
64
  except Exception as e:
65
  print(f" ❌ Model FAILED completely: {e}")
66
  traceback.print_exc()
67
- raise SystemExit("Cannot start without LLM. Check HF_TOKEN and GGUF compatibility.")
68
 
69
  print("[2/2] Loading edge-tts...")
70
  try:
@@ -75,7 +80,7 @@ except ImportError as e:
75
  edge_tts = None
76
 
77
  print("=" * 55)
78
- print(f" LLM : {GGUF_FILE}")
79
  print(f" TTS : edge-tts ({'READY' if edge_tts else 'DISABLED'})")
80
  print(f" Voice: {TTS_VOICE} | Rate: +7% | Pitch: +20Hz")
81
  print(f" Max tokens: {MAX_NEW_TOKENS}")
@@ -107,7 +112,7 @@ def add_to_memory(sid, role, content):
107
  def generate_response(user_input, session_id):
108
  memory = get_memory(session_id)
109
 
110
- # Build chat messages
111
  messages =[
112
  {"role": "system", "content": SYSTEM_PROMPT},
113
  {"role": "assistant", "content": "I am waiting for you!"},
@@ -122,18 +127,17 @@ def generate_response(user_input, session_id):
122
  # Current user message
123
  messages.append({"role": "user", "content": user_input})
124
 
125
- # Generate via pipeline
126
- outputs = pipe(
127
- messages,
128
- max_new_tokens=MAX_NEW_TOKENS,
129
- do_sample=True,
130
  temperature=0.9,
131
  top_k=45,
132
  top_p=0.97,
133
  )
134
 
135
  # Extract the assistant's newly generated text
136
- response = outputs[0]["generated_text"][-1]["content"].strip()
137
 
138
  if not response or len(response) < 2:
139
  response = "I appear to have momentarily lost my train of thought. Could you rephrase that?"
@@ -147,7 +151,7 @@ def generate_response(user_input, session_id):
147
  # ══════════════════════════════════════════
148
  async def _synthesize_edge(text, voice):
149
  # Applied specific settings from your image UI: rate +7%, pitch +20Hz
150
- communicate = edge_tts.Communicate(text, voice, rate="+9%", pitch="+15Hz")
151
  audio_data = b""
152
  async for chunk in communicate.stream():
153
  if chunk["type"] == "audio":
@@ -584,7 +588,7 @@ def clear():
584
  def health():
585
  return jsonify({
586
  "status": "online",
587
- "llm": "unsloth/LFM2.5-1.2B-Instruct-GGUF",
588
  "tts_mode": "edge-tts",
589
  "tts_model": "edge-tts" if edge_tts else "DISABLED",
590
  "tts_voice": TTS_VOICE,
 
5
  import datetime
6
  import traceback
7
  import asyncio
8
+ import multiprocessing
9
  from flask import Flask, request, jsonify
10
  from num2words import num2words
11
+ from huggingface_hub import hf_hub_download
12
+ from llama_cpp import Llama
13
 
14
  # ══════════════════════════════════════════
15
  # CONFIG
 
44
  return text
45
 
46
  # ══════════════════════════════════════════
47
+ # LOAD UNSLOTH GGUF (via llama.cpp) & EDGE-TTS
48
  # ══════════════════════════════════════════
49
  print("=" * 55)
50
  print(" J.A.R.V.I.S. β€” Booting Systems")
 
53
  LLM_ID = "unsloth/LFM2.5-1.2B-Instruct-GGUF"
54
  GGUF_FILE = "LFM2.5-1.2B-Instruct-UD-Q8_K_XL.gguf"
55
 
56
+ print(f"[1/2] Downloading & Loading {GGUF_FILE} via llama.cpp...")
57
  try:
58
+ # Auto-downloads the specific GGUF file from the repo and caches it
59
+ model_path = hf_hub_download(repo_id=LLM_ID, filename=GGUF_FILE)
60
+
61
+ # Initialize Llama.cpp engine
62
+ llm = Llama(
63
+ model_path=model_path,
64
+ n_ctx=4096, # Context window
65
+ n_threads=multiprocessing.cpu_count(),# Maximize CPU usage
66
+ verbose=False # Disable spammy C++ logs
67
  )
68
+ print(f" βœ… {GGUF_FILE} loaded successfully!")
69
  except Exception as e:
70
  print(f" ❌ Model FAILED completely: {e}")
71
  traceback.print_exc()
72
+ raise SystemExit("Cannot start without LLM.")
73
 
74
  print("[2/2] Loading edge-tts...")
75
  try:
 
80
  edge_tts = None
81
 
82
  print("=" * 55)
83
+ print(f" LLM : {GGUF_FILE} (llama.cpp)")
84
  print(f" TTS : edge-tts ({'READY' if edge_tts else 'DISABLED'})")
85
  print(f" Voice: {TTS_VOICE} | Rate: +7% | Pitch: +20Hz")
86
  print(f" Max tokens: {MAX_NEW_TOKENS}")
 
112
  def generate_response(user_input, session_id):
113
  memory = get_memory(session_id)
114
 
115
+ # Build chat messages for llama.cpp chat templates
116
  messages =[
117
  {"role": "system", "content": SYSTEM_PROMPT},
118
  {"role": "assistant", "content": "I am waiting for you!"},
 
127
  # Current user message
128
  messages.append({"role": "user", "content": user_input})
129
 
130
+ # Generate via llama-cpp-python
131
+ output = llm.create_chat_completion(
132
+ messages=messages,
133
+ max_tokens=MAX_NEW_TOKENS,
 
134
  temperature=0.9,
135
  top_k=45,
136
  top_p=0.97,
137
  )
138
 
139
  # Extract the assistant's newly generated text
140
+ response = output['choices'][0]['message']['content'].strip()
141
 
142
  if not response or len(response) < 2:
143
  response = "I appear to have momentarily lost my train of thought. Could you rephrase that?"
 
151
  # ══════════════════════════════════════════
152
  async def _synthesize_edge(text, voice):
153
  # Applied specific settings from your image UI: rate +7%, pitch +20Hz
154
+ communicate = edge_tts.Communicate(text, voice, rate="+7%", pitch="+20Hz")
155
  audio_data = b""
156
  async for chunk in communicate.stream():
157
  if chunk["type"] == "audio":
 
588
  def health():
589
  return jsonify({
590
  "status": "online",
591
+ "llm": "unsloth/LFM2.5-1.2B-Instruct-GGUF (llama.cpp)",
592
  "tts_mode": "edge-tts",
593
  "tts_model": "edge-tts" if edge_tts else "DISABLED",
594
  "tts_voice": TTS_VOICE,