Spaces:
Runtime error
Runtime error
File size: 2,940 Bytes
bdf90e5 7b897e7 bdf90e5 7b897e7 bdf90e5 f455864 bdf90e5 b8b9fc1 bdf90e5 365505c bdf90e5 02d1cde bdf90e5 7b897e7 bdf90e5 365505c 7b897e7 bdf90e5 365505c bdf90e5 365505c bdf90e5 365505c bdf90e5 365505c bdf90e5 365505c bdf90e5 365505c bdf90e5 365505c bdf90e5 f455864 bdf90e5 365505c f455864 365505c bdf90e5 7b897e7 bdf90e5 365505c f455864 7b897e7 bdf90e5 365505c bdf90e5 7b897e7 f455864 7b897e7 f455864 bdf90e5 7b897e7 bdf90e5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 | import os
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# ---------------------------------------------------
# Hugging Face Token (from Space Secrets)
# ---------------------------------------------------
HF_TOKEN = os.environ.get("HF_TOKEN")
# ---------------------------------------------------
# IBM Granite Model (Correct & Public)
# ---------------------------------------------------
MODEL_ID = "ibm-granite/granite-3.0-2b-instruct"
tokenizer = AutoTokenizer.from_pretrained(
MODEL_ID,
token=HF_TOKEN
)
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
device_map="auto",
token=HF_TOKEN
)
generator = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=180,
do_sample=False # π₯ prevents multiple summaries
)
# ---------------------------------------------------
# Core NLP Function (STRICT OUTPUT CONTROL)
# ---------------------------------------------------
def granite_nlp(task, text):
if not text.strip():
return "β οΈ Please enter some text."
if task == "Summarization":
prompt = f"""
You are an expert summarizer.
TASK:
Summarize the text below into EXACTLY 4 concise bullet points.
RULES:
- Do NOT repeat the input text
- Do NOT include explanations
- Output ONLY the bullet points
TEXT:
{text}
SUMMARY:
"""
elif task == "Classification":
prompt = f"""
You are a sentiment classification expert.
TASK:
Classify the sentiment of the text below.
RULES:
- Choose ONLY one word
- Options: Positive, Negative, Neutral
- Do NOT repeat the input text
TEXT:
{text}
ANSWER:
"""
else:
return "Invalid task selected."
output = generator(prompt)[0]["generated_text"]
# ---------------- Post-processing ----------------
if "SUMMARY:" in output:
output = output.split("SUMMARY:")[-1]
elif "ANSWER:" in output:
output = output.split("ANSWER:")[-1]
return output.strip()
# ---------------------------------------------------
# Gradio UI
# ---------------------------------------------------
with gr.Blocks(title="IBM Granite β Summarization & Classification") as demo:
gr.Markdown(
"""
# π§ IBM Granite β Text Summarization & Classification
**Deployed on Hugging Face Spaces**
Select a task, paste your text, and get clean AI output.
"""
)
task = gr.Radio(
["Summarization", "Classification"],
label="Select Task",
value="Summarization"
)
text_input = gr.Textbox(
lines=12,
label="Input Text",
placeholder="Paste your text here..."
)
output = gr.Textbox(
lines=12,
label="Model Output",
show_copy_button=True
)
btn = gr.Button("Run")
btn.click(
fn=granite_nlp,
inputs=[task, text_input],
outputs=output
)
demo.launch()
|