kirubel1738 commited on
Commit
ac9c332
·
verified ·
1 Parent(s): c52545d

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +150 -34
src/streamlit_app.py CHANGED
@@ -1,40 +1,156 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  import streamlit as st
 
5
 
6
- """
7
- # Welcome to Streamlit!
8
 
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
 
13
- In the meantime, below is an example of what you can do with just a few lines of code:
 
 
 
 
 
 
 
 
 
14
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
1
+ # streamlit_app.py
2
+ import os
3
+ import json
4
+ import time
5
+
6
+ # -----------------------------
7
+ # IMPORTANT: set cache dirs BEFORE importing transformers/huggingface_hub
8
+ # -----------------------------
9
+ os.environ.setdefault("HF_HOME", os.environ.get("HF_HOME", "/tmp/huggingface"))
10
+ os.environ.setdefault("TRANSFORMERS_CACHE", os.environ.get("TRANSFORMERS_CACHE", "/tmp/huggingface/transformers"))
11
+ os.environ.setdefault("HF_DATASETS_CACHE", os.environ.get("HF_DATASETS_CACHE", "/tmp/huggingface/datasets"))
12
+ os.environ.setdefault("HUGGINGFACE_HUB_CACHE", os.environ.get("HUGGINGFACE_HUB_CACHE", "/tmp/huggingface/hub"))
13
+ os.environ.setdefault("XDG_CACHE_HOME", os.environ.get("XDG_CACHE_HOME", "/tmp/huggingface"))
14
+ os.environ.setdefault("HOME", os.environ.get("HOME", "/tmp"))
15
+
16
+ # create cache dirs (best-effort)
17
+ for d in [os.environ["HF_HOME"], os.environ["TRANSFORMERS_CACHE"], os.environ["HF_DATASETS_CACHE"], os.environ["HUGGINGFACE_HUB_CACHE"]]:
18
+ try:
19
+ os.makedirs(d, exist_ok=True)
20
+ os.chmod(d, 0o777)
21
+ except Exception:
22
+ pass
23
+
24
  import streamlit as st
25
+ import requests
26
 
27
+ # Optional heavy imports will be inside local-model branch
28
+ LOCAL_MODE = os.environ.get("USE_LOCAL_MODEL", "0") == "1"
29
 
30
+ # default model id the user provided; keep as-is
31
+ DEFAULT_MODEL_ID = "kirubel1738/biogpt-pubmedqa-finetuned"
 
32
 
33
+ st.set_page_config(page_title="BioGPT (PubMedQA) demo", layout="centered")
34
+
35
+ st.title("BioGPT — PubMedQA demo")
36
+ st.caption("Defaults to the Hugging Face Inference API (recommended for Spaces / CPU).")
37
+
38
+ st.markdown(
39
+ """
40
+ **How it works**
41
+ - By default the app will call Hugging Face's Inference API for the model you specify (fast and avoids memory issues).
42
+ - If you set `USE_LOCAL_MODEL=1` in your environment, the app will attempt to load the model locally using `transformers` (only for GPUs/large memory machines).
43
  """
44
+ )
45
+
46
+ col1, col2 = st.columns([3,1])
47
+
48
+ with col1:
49
+ model_id = st.text_input("Model repo id", value=DEFAULT_MODEL_ID, help="Hugging Face repo id (e.g. username/modelname).")
50
+ prompt = st.text_area("Question / prompt", height=180, placeholder="Enter a PubMed-style question or prompt...")
51
+ with col2:
52
+ max_new_tokens = st.slider("Max new tokens", 16, 1024, 128)
53
+ temperature = st.slider("Temperature", 0.0, 1.5, 0.0, step=0.05)
54
+ method = st.radio("Run method", ("Inference API (recommended)", "Local model (heavy)"), index=0)
55
+
56
+ # override radio if user set USE_LOCAL_MODEL env var
57
+ if LOCAL_MODE:
58
+ method = "Local model (heavy)"
59
+
60
+ hf_token = os.environ.get("HUGGINGFACE_HUB_TOKEN") or os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACE_API_TOKEN")
61
+
62
+ def call_inference_api(model_id: str, prompt: str, max_new_tokens: int, temperature: float):
63
+ """
64
+ Simple POST to Hugging Face Inference API.
65
+ If you want to use the InferenceClient from huggingface_hub you can swap this.
66
+ """
67
+ api_url = f"https://api-inference.huggingface.co/models/{model_id}"
68
+ headers = {"Authorization": f"Bearer {hf_token}"} if hf_token else {}
69
+ payload = {
70
+ "inputs": prompt,
71
+ "parameters": {"max_new_tokens": max_new_tokens, "temperature": temperature},
72
+ "options": {"wait_for_model": True}
73
+ }
74
+ try:
75
+ r = requests.post(api_url, headers=headers, json=payload, timeout=120)
76
+ except Exception as e:
77
+ return False, f"Request failed: {e}"
78
+ if r.status_code != 200:
79
+ try:
80
+ error = r.json()
81
+ except Exception:
82
+ error = r.text
83
+ return False, f"API error ({r.status_code}): {error}"
84
+ try:
85
+ resp = r.json()
86
+ # handle several possible response schemas
87
+ if isinstance(resp, dict) and "error" in resp:
88
+ return False, resp["error"]
89
+ # often it's a list of dicts with 'generated_text'
90
+ if isinstance(resp, list):
91
+ out_texts = []
92
+ for item in resp:
93
+ if isinstance(item, dict):
94
+ # common key: 'generated_text'
95
+ for k in ("generated_text", "text", "content"):
96
+ if k in item:
97
+ out_texts.append(item[k])
98
+ break
99
+ else:
100
+ out_texts.append(json.dumps(item))
101
+ else:
102
+ out_texts.append(str(item))
103
+ return True, "\n\n".join(out_texts)
104
+ # fallback
105
+ return True, str(resp)
106
+ except Exception as e:
107
+ return False, f"Could not parse response: {e}"
108
+
109
+ # Local model loader (only if method chosen)
110
+ generator = None
111
+ if method.startswith("Local"):
112
+ st.warning("Local model mode selected — this requires transformers + torch and lots of RAM/GPU. Only use if you know the model fits your hardware.")
113
+ try:
114
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
115
+ import torch
116
+ device = 0 if torch.cuda.is_available() else -1
117
+ st.info(f"torch.cuda.is_available={torch.cuda.is_available()} -- device set to {device}")
118
+ with st.spinner("Loading tokenizer & model (this can take a while)..."):
119
+ tokenizer = AutoTokenizer.from_pretrained(model_id, cache_dir=os.environ.get("TRANSFORMERS_CACHE"))
120
+ model = AutoModelForCausalLM.from_pretrained(model_id, cache_dir=os.environ.get("TRANSFORMERS_CACHE"), low_cpu_mem_usage=True)
121
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)
122
+ except Exception as e:
123
+ st.error(f"Local model load failed: {e}")
124
+ st.stop()
125
+
126
+ if st.button("Generate"):
127
+ if not prompt or prompt.strip() == "":
128
+ st.error("Please enter a prompt.")
129
+ st.stop()
130
+
131
+ if method.startswith("Inference"):
132
+ if ("kirubel1738/biogpt-pubmedqa-finetuned" in model_id) and not hf_token:
133
+ st.info("If the model is private or rate-limited, set HUGGINGFACE_HUB_TOKEN as a secret in Spaces or as an env var locally.")
134
+ with st.spinner("Querying Hugging Face Inference API..."):
135
+ ok, out = call_inference_api(model_id, prompt, max_new_tokens, float(temperature))
136
+ if not ok:
137
+ st.error(out)
138
+ else:
139
+ st.success("Done")
140
+ st.text_area("Model output", value=out, height=320)
141
+ else:
142
+ # local model generation
143
+ try:
144
+ with st.spinner("Running local generation..."):
145
+ results = generator(prompt, max_new_tokens=max_new_tokens, do_sample=True, temperature=temperature)
146
+ if isinstance(results, list) and len(results) > 0 and "generated_text" in results[0]:
147
+ out = results[0]["generated_text"]
148
+ else:
149
+ out = str(results)
150
+ st.success("Done")
151
+ st.text_area("Model output", value=out, height=320)
152
+ except Exception as e:
153
+ st.error(f"Local generation failed: {e}")
154
 
155
+ st.markdown("---")
156
+ st.caption("If you run into permissions errors in Spaces, ensure the HF cache env vars above point to a writable directory (we already set them to /tmp/huggingface in this container).")