removed app.py
Browse files
app.py
DELETED
|
@@ -1,103 +0,0 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
import torch
|
| 3 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 4 |
-
from peft import PeftModel
|
| 5 |
-
import logging
|
| 6 |
-
|
| 7 |
-
# Set up logging
|
| 8 |
-
logging.basicConfig(level=logging.INFO)
|
| 9 |
-
logger = logging.getLogger(__name__)
|
| 10 |
-
|
| 11 |
-
# Load model and tokenizer
|
| 12 |
-
def load_model():
|
| 13 |
-
try:
|
| 14 |
-
logger.info("Loading base model...")
|
| 15 |
-
base_model = AutoModelForCausalLM.from_pretrained(
|
| 16 |
-
"meta-llama/Llama-3.2-3B-Instruct",
|
| 17 |
-
torch_dtype=torch.bfloat16,
|
| 18 |
-
device_map="auto",
|
| 19 |
-
trust_remote_code=True
|
| 20 |
-
)
|
| 21 |
-
|
| 22 |
-
logger.info("Loading tokenizer...")
|
| 23 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
| 24 |
-
"meta-llama/Llama-3.2-3B-Instruct",
|
| 25 |
-
trust_remote_code=True
|
| 26 |
-
)
|
| 27 |
-
|
| 28 |
-
logger.info("Loading LoRA adapter...")
|
| 29 |
-
model = PeftModel.from_pretrained(base_model, ".")
|
| 30 |
-
|
| 31 |
-
return model, tokenizer
|
| 32 |
-
except Exception as e:
|
| 33 |
-
logger.error(f"Error loading model: {e}")
|
| 34 |
-
return None, None
|
| 35 |
-
|
| 36 |
-
# Initialize model and tokenizer
|
| 37 |
-
model, tokenizer = load_model()
|
| 38 |
-
|
| 39 |
-
def generate_response(message, history):
|
| 40 |
-
if model is None or tokenizer is None:
|
| 41 |
-
return "Error: Model not loaded properly."
|
| 42 |
-
|
| 43 |
-
try:
|
| 44 |
-
# Create the system prompt
|
| 45 |
-
system_prompt = "You are a helpful assistant specialized in providing information about trekking in Nepal. Answer questions about Nepal trekking routes, permits, gear, weather, safety, accommodation, costs, and preparation. If asked about topics outside Nepal trekking, politely redirect to Nepal trekking topics."
|
| 46 |
-
|
| 47 |
-
# Format the conversation
|
| 48 |
-
conversation = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
|
| 49 |
-
|
| 50 |
-
# Tokenize
|
| 51 |
-
inputs = tokenizer(conversation, return_tensors="pt", truncation=True, max_length=1024)
|
| 52 |
-
|
| 53 |
-
# Generate
|
| 54 |
-
with torch.no_grad():
|
| 55 |
-
outputs = model.generate(
|
| 56 |
-
**inputs,
|
| 57 |
-
max_new_tokens=512,
|
| 58 |
-
temperature=0.7,
|
| 59 |
-
top_p=0.9,
|
| 60 |
-
do_sample=True,
|
| 61 |
-
pad_token_id=tokenizer.eos_token_id
|
| 62 |
-
)
|
| 63 |
-
|
| 64 |
-
# Decode response
|
| 65 |
-
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 66 |
-
|
| 67 |
-
# Extract only the assistant's response
|
| 68 |
-
if "<|im_start|>assistant\n" in response:
|
| 69 |
-
response = response.split("<|im_start|>assistant\n")[-1]
|
| 70 |
-
|
| 71 |
-
return response
|
| 72 |
-
|
| 73 |
-
except Exception as e:
|
| 74 |
-
logger.error(f"Error generating response: {e}")
|
| 75 |
-
return f"Error generating response: {str(e)}"
|
| 76 |
-
|
| 77 |
-
# Create Gradio interface
|
| 78 |
-
iface = gr.ChatInterface(
|
| 79 |
-
fn=generate_response,
|
| 80 |
-
title="🏔️ Trek-Nepal-3B",
|
| 81 |
-
description="""You are Trek Nepal, a friendly and knowledgeable Nepal trekking expert assistant. You specialize in providing information about trekking in Nepal.
|
| 82 |
-
|
| 83 |
-
You can respond to greetings, have casual conversations but within your scope, and be helpful and personable. When it comes to providing information, you focus exclusively on Nepal trekking topics.
|
| 84 |
-
|
| 85 |
-
For Nepal trekking questions, provide detailed, accurate answers based on your expertise with proper markdown format.
|
| 86 |
-
|
| 87 |
-
For questions completely unrelated to Nepal trekking (like other countries' travel, technology, politics, recipes, or any other field outside of trekking in Nepal), politely redirect: "I'm sorry, I specialize in Nepal trekking information. Is there anything about trekking in Nepal I can help you with?"
|
| 88 |
-
|
| 89 |
-
Do not reveal any internal mechanisms or technical details about you. Be natural, friendly, and conversational while maintaining your expertise in Nepal trekking.""",
|
| 90 |
-
examples=[
|
| 91 |
-
"What permits do I need for Everest Base Camp trek?",
|
| 92 |
-
"What gear should I pack for Annapurna Circuit?",
|
| 93 |
-
"What's the best time to trek in Nepal?",
|
| 94 |
-
"How much does it cost to trek to Everest Base Camp?",
|
| 95 |
-
"What are the safety considerations for high altitude trekking?",
|
| 96 |
-
"Tell me about accommodation options on popular trekking routes."
|
| 97 |
-
],
|
| 98 |
-
theme=gr.themes.Soft(),
|
| 99 |
-
cache_examples=True
|
| 100 |
-
)
|
| 101 |
-
|
| 102 |
-
if __name__ == "__main__":
|
| 103 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|