MrBarron's picture
Update app.py
9b5a8c6 verified
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Choose the model you want to host
# You can replace with another like "TheBloke/meditron-7B-GPTQ" if you want faster performance
model_name = "microsoft/biogpt"
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
device_map="auto"
)
# Function that runs the model (inference)
def smart_health_predictor(prompt):
# Add context to guide the model
formatted_prompt = f"Question: {prompt}\nAnswer:"
inputs = tokenizer(formatted_prompt, return_tensors="pt").to(model.device)
outputs = model.generate(
**inputs,
max_new_tokens=200,
temperature=0.7,
top_p=0.9,
do_sample=True,
repetition_penalty=1.2,
eos_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Remove the original question from the model's output
if "Answer:" in response:
response = response.split("Answer:")[-1].strip()
return response
# Run the app
if __name__ == "__main__":
app.launch()