Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,8 +4,8 @@ from transformers import pipeline, TextIteratorStreamer
|
|
| 4 |
import torch
|
| 5 |
import threading
|
| 6 |
|
| 7 |
-
# Load
|
| 8 |
-
model_name = "krish10/
|
| 9 |
pipe = pipeline("text-generation", model=model_name, device=0)
|
| 10 |
tokenizer = pipe.tokenizer
|
| 11 |
model = pipe.model
|
|
@@ -15,13 +15,42 @@ MAX_TOKENS = 3000
|
|
| 15 |
TEMPERATURE = 0.1
|
| 16 |
TOP_P = 0.9
|
| 17 |
|
| 18 |
-
# Generator function for streaming
|
| 19 |
@spaces.GPU
|
| 20 |
-
def respond_stream(
|
| 21 |
-
|
| 22 |
-
|
|
|
|
| 23 |
|
| 24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
| 26 |
|
| 27 |
generation_kwargs = dict(
|
|
@@ -38,30 +67,35 @@ def respond_stream(user_input):
|
|
| 38 |
thread.start()
|
| 39 |
|
| 40 |
partial_text = ""
|
| 41 |
-
for
|
| 42 |
-
partial_text +=
|
| 43 |
yield partial_text
|
| 44 |
|
| 45 |
-
# Gradio interface
|
| 46 |
with gr.Blocks() as demo:
|
| 47 |
-
gr.Markdown("## 🤖 Qwen Streaming Chat
|
| 48 |
|
| 49 |
-
|
| 50 |
-
lines=
|
| 51 |
-
label="
|
| 52 |
-
|
| 53 |
-
|
|
|
|
| 54 |
|
| 55 |
-
|
|
|
|
|
|
|
|
|
|
| 56 |
|
|
|
|
| 57 |
generate_btn = gr.Button("Generate")
|
| 58 |
|
| 59 |
generate_btn.click(
|
| 60 |
fn=respond_stream,
|
| 61 |
-
inputs=[
|
| 62 |
outputs=[output_box]
|
| 63 |
)
|
| 64 |
|
| 65 |
-
# Launch
|
| 66 |
if __name__ == "__main__":
|
| 67 |
demo.launch()
|
|
|
|
| 4 |
import torch
|
| 5 |
import threading
|
| 6 |
|
| 7 |
+
# Load model and tokenizer
|
| 8 |
+
model_name = "krish10/Qwen3_0.6B_16bit_TA_screen"
|
| 9 |
pipe = pipeline("text-generation", model=model_name, device=0)
|
| 10 |
tokenizer = pipe.tokenizer
|
| 11 |
model = pipe.model
|
|
|
|
| 15 |
TEMPERATURE = 0.1
|
| 16 |
TOP_P = 0.9
|
| 17 |
|
|
|
|
| 18 |
@spaces.GPU
|
| 19 |
+
def respond_stream(population, intervention, comparison, outcome, study_design, summary, title, abstract):
|
| 20 |
+
# Validate required fields
|
| 21 |
+
if not title.strip() or not abstract.strip():
|
| 22 |
+
return "❌ Error: Title and Abstract are required."
|
| 23 |
|
| 24 |
+
criteria_parts = []
|
| 25 |
+
if population.strip():
|
| 26 |
+
criteria_parts.append(f"Population of interest = {population.strip()}")
|
| 27 |
+
if intervention.strip():
|
| 28 |
+
criteria_parts.append(f"Intervention/exposure of interest = {intervention.strip()}")
|
| 29 |
+
if comparison.strip():
|
| 30 |
+
criteria_parts.append(f"Comparison of interest = {comparison.strip()}")
|
| 31 |
+
if outcome.strip():
|
| 32 |
+
criteria_parts.append(f"Outcome of interest = {outcome.strip()}")
|
| 33 |
+
if study_design.strip():
|
| 34 |
+
criteria_parts.append(f"Study design of interest = {study_design.strip()}")
|
| 35 |
+
|
| 36 |
+
if not criteria_parts:
|
| 37 |
+
return "❌ Error: At least one of the five PICOS criteria must be filled."
|
| 38 |
+
|
| 39 |
+
# Build instruction section
|
| 40 |
+
instruction = "Instruction: " + "\n".join(criteria_parts)
|
| 41 |
+
|
| 42 |
+
# Construct full prompt
|
| 43 |
+
prompt = instruction
|
| 44 |
+
if summary.strip():
|
| 45 |
+
prompt += f"\n\nPICOS Summary: {summary.strip()}"
|
| 46 |
+
prompt += f"\n\nTitle: {title.strip()}\nAbstract: {abstract.strip()}"
|
| 47 |
+
|
| 48 |
+
# Wrap into message for chat template
|
| 49 |
+
messages = [{"role": "user", "content": prompt}]
|
| 50 |
+
prompt_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 51 |
+
|
| 52 |
+
# Tokenize and prepare streamer
|
| 53 |
+
inputs = tokenizer(prompt_text, return_tensors="pt").to("cuda")
|
| 54 |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
| 55 |
|
| 56 |
generation_kwargs = dict(
|
|
|
|
| 67 |
thread.start()
|
| 68 |
|
| 69 |
partial_text = ""
|
| 70 |
+
for token in streamer:
|
| 71 |
+
partial_text += token
|
| 72 |
yield partial_text
|
| 73 |
|
| 74 |
+
# Build Gradio interface
|
| 75 |
with gr.Blocks() as demo:
|
| 76 |
+
gr.Markdown("## 🤖 Qwen Streaming Chat — Structured Medical Abstract Evaluation")
|
| 77 |
|
| 78 |
+
with gr.Column():
|
| 79 |
+
population = gr.Textbox(label="Population of interest", lines=1)
|
| 80 |
+
intervention = gr.Textbox(label="Intervention/exposure of interest", lines=1)
|
| 81 |
+
comparison = gr.Textbox(label="Comparison of interest", lines=1)
|
| 82 |
+
outcome = gr.Textbox(label="Outcome of interest", lines=1)
|
| 83 |
+
study_design = gr.Textbox(label="Study design of interest", lines=1)
|
| 84 |
|
| 85 |
+
with gr.Column():
|
| 86 |
+
summary = gr.Textbox(label="PICOS Summary (optional)", lines=4)
|
| 87 |
+
title = gr.Textbox(label="Title", lines=2, placeholder="Required")
|
| 88 |
+
abstract = gr.Textbox(label="Abstract", lines=10, placeholder="Required")
|
| 89 |
|
| 90 |
+
output_box = gr.Textbox(label="Model Response", lines=15, interactive=False)
|
| 91 |
generate_btn = gr.Button("Generate")
|
| 92 |
|
| 93 |
generate_btn.click(
|
| 94 |
fn=respond_stream,
|
| 95 |
+
inputs=[population, intervention, comparison, outcome, study_design, summary, title, abstract],
|
| 96 |
outputs=[output_box]
|
| 97 |
)
|
| 98 |
|
| 99 |
+
# Launch the app
|
| 100 |
if __name__ == "__main__":
|
| 101 |
demo.launch()
|