Spaces:
Runtime error
Runtime error
| import streamlit as st | |
| import torch | |
| from transformers import pipeline | |
| # βββββββββββββββββββββ | |
| # Models | |
| # βββββββββββββββββββββ | |
| PRIMARY_MODEL = "AventIQ-AI/t5-medical-chatbot" | |
| FALLBACK_MODEL = "Anjanams04/healthbot" | |
| def load_pipeline(model_name: str): | |
| device = 0 if torch.cuda.is_available() else -1 | |
| try: | |
| return pipeline("text2text-generation", model=model_name, tokenizer=model_name, device=device) | |
| except Exception as e: | |
| st.error(f"Error loading {model_name}: {e}") | |
| return None | |
| # βββββββββββββββββββββ | |
| # Streamlit UI | |
| # βββββββββββββββββββββ | |
| st.set_page_config(page_title="Medical Chatbot Demo", page_icon="π©Ί") | |
| st.title("π©Ί Medical Chatbot Demo") | |
| st.caption("For demonstration only β not medical advice.") | |
| symptoms = st.text_area("Describe your symptoms:", value="I have flu, body pain and runny nose") | |
| if st.button("Get Suggestion"): | |
| if not symptoms.strip(): | |
| st.warning("Please enter symptoms.") | |
| else: | |
| med_pipe = load_pipeline(PRIMARY_MODEL) | |
| if med_pipe is None: | |
| med_pipe = load_pipeline(FALLBACK_MODEL) | |
| if med_pipe is None: | |
| st.error("Could not load any model.") | |
| else: | |
| prompt = f"Patient symptoms: {symptoms}\nProvide: 1) Likely condition(s) 2) Non-prescription remedies 3) Precautions & red flags 4) Disclaimer." | |
| try: | |
| result = med_pipe(prompt, max_length=200, do_sample=False) | |
| reply = result[0]["generated_text"].strip() | |
| st.subheader("π‘ Suggested Response") | |
| st.write(reply) | |
| except Exception as e: | |
| st.error(f"Model inference failed: {e}") | |