Spaces:
Runtime error
Runtime error
| import os | |
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| # --------------------------------------------------- | |
| # Hugging Face Token (from Space Secrets) | |
| # --------------------------------------------------- | |
| HF_TOKEN = os.environ.get("HF_TOKEN") | |
| # --------------------------------------------------- | |
| # IBM Granite Model (Correct & Public) | |
| # --------------------------------------------------- | |
| MODEL_ID = "ibm-granite/granite-3.0-2b-instruct" | |
| tokenizer = AutoTokenizer.from_pretrained( | |
| MODEL_ID, | |
| token=HF_TOKEN | |
| ) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_ID, | |
| device_map="auto", | |
| token=HF_TOKEN | |
| ) | |
| generator = pipeline( | |
| "text-generation", | |
| model=model, | |
| tokenizer=tokenizer, | |
| max_new_tokens=180, | |
| do_sample=False # π₯ prevents multiple summaries | |
| ) | |
| # --------------------------------------------------- | |
| # Core NLP Function (STRICT OUTPUT CONTROL) | |
| # --------------------------------------------------- | |
| def granite_nlp(task, text): | |
| if not text.strip(): | |
| return "β οΈ Please enter some text." | |
| if task == "Summarization": | |
| prompt = f""" | |
| You are an expert summarizer. | |
| TASK: | |
| Summarize the text below into EXACTLY 4 concise bullet points. | |
| RULES: | |
| - Do NOT repeat the input text | |
| - Do NOT include explanations | |
| - Output ONLY the bullet points | |
| TEXT: | |
| {text} | |
| SUMMARY: | |
| """ | |
| elif task == "Classification": | |
| prompt = f""" | |
| You are a sentiment classification expert. | |
| TASK: | |
| Classify the sentiment of the text below. | |
| RULES: | |
| - Choose ONLY one word | |
| - Options: Positive, Negative, Neutral | |
| - Do NOT repeat the input text | |
| TEXT: | |
| {text} | |
| ANSWER: | |
| """ | |
| else: | |
| return "Invalid task selected." | |
| output = generator(prompt)[0]["generated_text"] | |
| # ---------------- Post-processing ---------------- | |
| if "SUMMARY:" in output: | |
| output = output.split("SUMMARY:")[-1] | |
| elif "ANSWER:" in output: | |
| output = output.split("ANSWER:")[-1] | |
| return output.strip() | |
| # --------------------------------------------------- | |
| # Gradio UI | |
| # --------------------------------------------------- | |
| with gr.Blocks(title="IBM Granite β Summarization & Classification") as demo: | |
| gr.Markdown( | |
| """ | |
| # π§ IBM Granite β Text Summarization & Classification | |
| **Deployed on Hugging Face Spaces** | |
| Select a task, paste your text, and get clean AI output. | |
| """ | |
| ) | |
| task = gr.Radio( | |
| ["Summarization", "Classification"], | |
| label="Select Task", | |
| value="Summarization" | |
| ) | |
| text_input = gr.Textbox( | |
| lines=12, | |
| label="Input Text", | |
| placeholder="Paste your text here..." | |
| ) | |
| output = gr.Textbox( | |
| lines=12, | |
| label="Model Output", | |
| show_copy_button=True | |
| ) | |
| btn = gr.Button("Run") | |
| btn.click( | |
| fn=granite_nlp, | |
| inputs=[task, text_input], | |
| outputs=output | |
| ) | |
| demo.launch() | |