Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from llama_cpp import Llama
|
| 3 |
+
from huggingface_hub import hf_hub_download
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
# ==========================================
|
| 7 |
+
# 1. CONFIGURATION
|
| 8 |
+
# ==========================================
|
| 9 |
+
# REPLACE with your actual Hugging Face Repo ID and Filename
|
| 10 |
+
MODEL_REPO = "simran40/BBSBEC-GGUF"
|
| 11 |
+
MODEL_FILE = "bbsbec_model.q4_k_m.gguf"
|
| 12 |
+
|
| 13 |
+
print("β³ System Startup: Checking Model...")
|
| 14 |
+
llm = None
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
# Check if model exists locally or download it
|
| 18 |
+
model_path = hf_hub_download(
|
| 19 |
+
repo_id=MODEL_REPO,
|
| 20 |
+
filename=MODEL_FILE,
|
| 21 |
+
cache_dir="./model_cache"
|
| 22 |
+
)
|
| 23 |
+
print(f"β
Model Found: {model_path}")
|
| 24 |
+
|
| 25 |
+
# Initialize Llama.cpp Engine
|
| 26 |
+
llm = Llama(
|
| 27 |
+
model_path=model_path,
|
| 28 |
+
n_ctx=2048, # Context window size
|
| 29 |
+
n_threads=2, # CPU threads
|
| 30 |
+
n_batch=512,
|
| 31 |
+
verbose=False
|
| 32 |
+
)
|
| 33 |
+
print("β
Inference Engine Ready")
|
| 34 |
+
|
| 35 |
+
except Exception as e:
|
| 36 |
+
print(f"β Load Error: {e}")
|
| 37 |
+
print("β οΈ App starting in Safe Mode (Chat disabled).")
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
# ==========================================
|
| 41 |
+
# 2. PROMPT ENGINEERING (MATCHING TRAINING PIPELINE)
|
| 42 |
+
# ==========================================
|
| 43 |
+
|
| 44 |
+
# CRITICAL: This MUST match the System Prompt used in Cell 8 & 12 of your training notebook.
|
| 45 |
+
SYSTEM_IDENTITY = """You are the official AI Assistant for BABA BANDA SINGH BAHADUR ENGINEERING COLLEGE, FATEHGARH SAHIB.
|
| 46 |
+
Your role is to answer questions about B.Tech, M.Tech, BCA, MBA, exams, hostels, placements, and campus facilities.
|
| 47 |
+
You are helpful, polite, and strictly factual.
|
| 48 |
+
You are NOT a human. You do not have feelings."""
|
| 49 |
+
|
| 50 |
+
def format_prompt(history, message):
|
| 51 |
+
"""
|
| 52 |
+
Constructs the prompt exactly as the model was fine-tuned.
|
| 53 |
+
Format: Alpaca-Style
|
| 54 |
+
"""
|
| 55 |
+
prompt_context = ""
|
| 56 |
+
|
| 57 |
+
# Include recent chat history to allow follow-up questions
|
| 58 |
+
if history:
|
| 59 |
+
for turn in history[-2:]: # Keep last 2 turns
|
| 60 |
+
if isinstance(turn, (list, tuple)) and len(turn) >= 2:
|
| 61 |
+
user_msg = turn[0]
|
| 62 |
+
bot_msg = turn[1]
|
| 63 |
+
prompt_context += f"User: {str(user_msg)}\nAssistant: {str(bot_msg)}\n"
|
| 64 |
+
|
| 65 |
+
# The Exact Prompt Structure needed for your Fine-Tuned Llama-3 Model
|
| 66 |
+
full_prompt = (
|
| 67 |
+
f"### Instruction:\n"
|
| 68 |
+
f"{SYSTEM_IDENTITY}\n\n"
|
| 69 |
+
|
| 70 |
+
f"### Previous Context:\n"
|
| 71 |
+
f"{prompt_context}\n"
|
| 72 |
+
|
| 73 |
+
f"### Current User Question:\n"
|
| 74 |
+
f"{message}\n\n"
|
| 75 |
+
|
| 76 |
+
f"### Response:\n"
|
| 77 |
+
)
|
| 78 |
+
return full_prompt
|
| 79 |
+
|
| 80 |
+
def chat_with_bot(message, history):
|
| 81 |
+
# --- Safety Check ---
|
| 82 |
+
if llm is None:
|
| 83 |
+
yield "β οΈ **System Error:** Model not found. Please check MODEL_REPO in the code."
|
| 84 |
+
return
|
| 85 |
+
|
| 86 |
+
# --- Generate Response ---
|
| 87 |
+
prompt = format_prompt(history, message)
|
| 88 |
+
|
| 89 |
+
try:
|
| 90 |
+
stream = llm(
|
| 91 |
+
prompt,
|
| 92 |
+
max_tokens=256,
|
| 93 |
+
temperature=0.1, # Low temp for factual accuracy (as tested in Cell 12)
|
| 94 |
+
top_p=0.9,
|
| 95 |
+
repeat_penalty=1.2, # Higher penalty to prevent loops
|
| 96 |
+
stop=["###", "User:", "Assistant:", "<|end_of_text|>"],
|
| 97 |
+
stream=True
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
response = ""
|
| 101 |
+
for chunk in stream:
|
| 102 |
+
text = chunk["choices"][0]["text"]
|
| 103 |
+
response += text
|
| 104 |
+
yield response
|
| 105 |
+
|
| 106 |
+
except Exception as e:
|
| 107 |
+
yield f"Error: {str(e)}"
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
# ==========================================
|
| 111 |
+
# 3. USER INTERFACE (BBSBEC BRANDING)
|
| 112 |
+
# ==========================================
|
| 113 |
+
|
| 114 |
+
custom_css = ".gradio-container {max-width: 800px; margin: auto;}"
|
| 115 |
+
|
| 116 |
+
with gr.Blocks(theme=gr.themes.Soft(), css=custom_css, title="BBSBEC AI Assistant") as demo:
|
| 117 |
+
gr.Markdown(
|
| 118 |
+
"""
|
| 119 |
+
# π« BBSBEC Fatehgarh Sahib Assistant
|
| 120 |
+
|
| 121 |
+
I am the official AI for **Baba Banda Singh Bahadur Engineering College**.
|
| 122 |
+
Managed by **SGPC**. Affiliated with **IKGPTU**.
|
| 123 |
+
|
| 124 |
+
**Ask me about:**
|
| 125 |
+
* π B.Tech, M.Tech, BCA, MBA Admissions
|
| 126 |
+
* π° Fees & Scholarships
|
| 127 |
+
* π¨ Hostels (Baba Ajit Singh, Mata Gujri, etc.)
|
| 128 |
+
* π Exams (MSTs, Results) & Placements
|
| 129 |
+
"""
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
chatbot = gr.ChatInterface(
|
| 133 |
+
fn=chat_with_bot,
|
| 134 |
+
chatbot=gr.Chatbot(height=450, show_label=False),
|
| 135 |
+
textbox=gr.Textbox(
|
| 136 |
+
placeholder="E.g., What is the fee for B.Tech CSE?",
|
| 137 |
+
container=False,
|
| 138 |
+
scale=7
|
| 139 |
+
),
|
| 140 |
+
examples=[
|
| 141 |
+
"What is the eligibility for B.Tech CSE?",
|
| 142 |
+
"Tell me about the hostel facilities.",
|
| 143 |
+
"Do you offer BCA?",
|
| 144 |
+
"How far is the college from the railway station?",
|
| 145 |
+
"Is there a ragging free campus?"
|
| 146 |
+
],
|
| 147 |
+
cache_examples=False,
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
gr.Markdown(
|
| 151 |
+
"""
|
| 152 |
+
<div style="text-align: center; font-size: 0.8em; color: gray;">
|
| 153 |
+
BBSBEC AI Assistant β’ Powered by Llama-3.2-1B (Fine-Tuned)
|
| 154 |
+
</div>
|
| 155 |
+
"""
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
if __name__ == "__main__":
|
| 159 |
+
demo.queue(max_size=5).launch(
|
| 160 |
+
server_name="0.0.0.0",
|
| 161 |
+
server_port=7860
|
| 162 |
+
)
|