ski / app.py
KlemGunn0519's picture
Update app.py
7f5f3b3 verified
import subprocess
import sys
import os
# Install required packages directly in the app
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
print("Installing required packages...")
install("transformers==4.56.0")
install("torch==2.3.0")
install("peft==0.12.0")
install("accelerate==0.28.0")
install("bitsandbytes==0.43.0")
install("sentencepiece")
install("protobuf")
install("gradio==5.27.0")
print("All packages installed. Loading model...")
# Now import after installation
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel
# Load base model
base_model = "google/gemma-2-2b"
tokenizer = AutoTokenizer.from_pretrained(base_model, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
base_model,
torch_dtype="auto",
device_map="auto",
trust_remote_code=True,
use_cache=False
)
# Load your fine-tuned adapter
model = PeftModel.from_pretrained(model, "KlemGunn0519/Mighty_Mountain_Ski_Resort")
def respond(message, history):
prompt = f"### Instruction\n{message}\n\n### Response"
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
outputs = model.generate(
**inputs,
max_new_tokens=200,
do_sample=True,
temperature=0.7,
top_p=0.9,
pad_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response.split("### Response")[-1].strip()
# Create Gradio interface
demo = gr.ChatInterface(
fn=respond,
title="🎿 Mighty Mountain Ski Resort Assistant",
description="Ask about lift tickets, trail conditions, hours, and more!",
examples=[
"What are daily lift ticket prices?",
"What's the best beginner slope?",
"Do you offer ski rentals?",
"When does the resort open for the season?"
],
theme="soft",
retry_btn=None,
undo_btn=None,
clear_btn="Clear"
).queue()
if __name__ == "__main__":
demo.launch()