Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,7 +4,7 @@ import torch
|
|
| 4 |
import os
|
| 5 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 6 |
|
| 7 |
-
#
|
| 8 |
model_name = os.environ.get("MODEL_ID")
|
| 9 |
|
| 10 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
@@ -18,16 +18,8 @@ model.eval()
|
|
| 18 |
SYSTEM_PROMPT = "You are an expert biomedical assistant trained to identify randomized controlled trials (RCTs). Include RCTs and exclude non-RCTs."
|
| 19 |
|
| 20 |
@spaces.GPU(duration=120)
|
| 21 |
-
def chat_with_llm(message
|
| 22 |
-
|
| 23 |
-
chat_prompt = f"<|system|>\n{SYSTEM_PROMPT.strip()}\n"
|
| 24 |
-
|
| 25 |
-
for turn in history:
|
| 26 |
-
if len(turn) == 2:
|
| 27 |
-
user_msg, bot_reply = turn
|
| 28 |
-
chat_prompt += f"<|user|>\n{user_msg.strip()}\n<|assistant|>\n{bot_reply.strip()}\n"
|
| 29 |
-
|
| 30 |
-
chat_prompt += f"<|user|>\n{message.strip()}\n<|assistant|>\n"
|
| 31 |
|
| 32 |
inputs = tokenizer(chat_prompt, return_tensors="pt", return_token_type_ids=False).to(model.device)
|
| 33 |
inputs.pop("token_type_ids", None)
|
|
@@ -50,28 +42,26 @@ def chat_with_llm(message, history):
|
|
| 50 |
return reply
|
| 51 |
|
| 52 |
with gr.Blocks() as demo:
|
| 53 |
-
gr.Markdown("## 🧠 RCT Classifier Demonstration")
|
| 54 |
|
| 55 |
chatbot = gr.Chatbot()
|
|
|
|
| 56 |
with gr.Row():
|
| 57 |
-
title = gr.Textbox(label="Title", placeholder="Enter title")
|
| 58 |
abstract = gr.Textbox(label="Abstract", placeholder="Enter abstract", lines=6)
|
| 59 |
|
| 60 |
-
submit = gr.Button("
|
| 61 |
-
|
| 62 |
-
state = gr.State([])
|
| 63 |
|
| 64 |
-
def handle_submit(title_text, abstract_text
|
| 65 |
message = f"Title: {title_text.strip()}\nAbstract: {abstract_text.strip()}"
|
| 66 |
-
response = chat_with_llm(message
|
| 67 |
-
|
| 68 |
-
return history, history
|
| 69 |
|
| 70 |
submit.click(
|
| 71 |
fn=handle_submit,
|
| 72 |
-
inputs=[title, abstract
|
| 73 |
-
outputs=
|
| 74 |
)
|
| 75 |
|
| 76 |
if __name__ == "__main__":
|
| 77 |
-
demo.launch()
|
|
|
|
| 4 |
import os
|
| 5 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 6 |
|
| 7 |
+
# Load model from Hugging Face secret
|
| 8 |
model_name = os.environ.get("MODEL_ID")
|
| 9 |
|
| 10 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
|
|
| 18 |
SYSTEM_PROMPT = "You are an expert biomedical assistant trained to identify randomized controlled trials (RCTs). Include RCTs and exclude non-RCTs."
|
| 19 |
|
| 20 |
@spaces.GPU(duration=120)
|
| 21 |
+
def chat_with_llm(message):
|
| 22 |
+
chat_prompt = f"<|system|>\n{SYSTEM_PROMPT.strip()}\n<|user|>\n{message.strip()}\n<|assistant|>\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
inputs = tokenizer(chat_prompt, return_tensors="pt", return_token_type_ids=False).to(model.device)
|
| 25 |
inputs.pop("token_type_ids", None)
|
|
|
|
| 42 |
return reply
|
| 43 |
|
| 44 |
with gr.Blocks() as demo:
|
| 45 |
+
gr.Markdown("## 🧠 RCT Classifier Demonstration (Stateless)")
|
| 46 |
|
| 47 |
chatbot = gr.Chatbot()
|
| 48 |
+
|
| 49 |
with gr.Row():
|
| 50 |
+
title = gr.Textbox(label="Title", placeholder="Enter article title")
|
| 51 |
abstract = gr.Textbox(label="Abstract", placeholder="Enter abstract", lines=6)
|
| 52 |
|
| 53 |
+
submit = gr.Button("Classify")
|
|
|
|
|
|
|
| 54 |
|
| 55 |
+
def handle_submit(title_text, abstract_text):
|
| 56 |
message = f"Title: {title_text.strip()}\nAbstract: {abstract_text.strip()}"
|
| 57 |
+
response = chat_with_llm(message)
|
| 58 |
+
return [(message, response)] # Single-turn memory
|
|
|
|
| 59 |
|
| 60 |
submit.click(
|
| 61 |
fn=handle_submit,
|
| 62 |
+
inputs=[title, abstract],
|
| 63 |
+
outputs=chatbot
|
| 64 |
)
|
| 65 |
|
| 66 |
if __name__ == "__main__":
|
| 67 |
+
demo.launch()
|