File size: 1,298 Bytes
05c6de5
5ae9047
 
05c6de5
5ae9047
 
2a60c58
05c6de5
 
5ae9047
 
 
 
 
 
05c6de5
 
5ae9047
 
9b5a8c6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5ae9047
05c6de5
5ae9047
05c6de5
5ae9047
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

# Choose the model you want to host
# You can replace with another like "TheBloke/meditron-7B-GPTQ" if you want faster performance
model_name = "microsoft/biogpt"


# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
    device_map="auto"
)

# Function that runs the model (inference)
def smart_health_predictor(prompt):
    # Add context to guide the model
    formatted_prompt = f"Question: {prompt}\nAnswer:"
    
    inputs = tokenizer(formatted_prompt, return_tensors="pt").to(model.device)
    outputs = model.generate(
        **inputs,
        max_new_tokens=200,
        temperature=0.7,
        top_p=0.9,
        do_sample=True,
        repetition_penalty=1.2,
        eos_token_id=tokenizer.eos_token_id
    )
    
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    
    # Remove the original question from the model's output
    if "Answer:" in response:
        response = response.split("Answer:")[-1].strip()
    
    return response


# Run the app
if __name__ == "__main__":
    app.launch()