Spaces:
Sleeping
Sleeping
File size: 501 Bytes
fec4f5a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
import gradio as gr
from huggingface_hub import InferenceClient
MODEL = "epfl-llm/meditron-7b"
def chat(message, history, hf_token):
if not hf_token: return "Please log in"
client = InferenceClient(token=hf_token.token, model=MODEL)
response = client.text_generation(f"Medical tutor: {message}", max_new_tokens=150)
return response
gr.ChatInterface(
chat,
title="🩺 Medical Tutor",
examples=["Explain how vaccines work", "What is DNA?"]
).launch(server_port=7860) |