Spaces:
Sleeping
Sleeping
| # Lets Launch Sagar's demo on web | |
| # YAML metadata looks for app.py by default | |
| # app.py import packages , define gradio function, create demo, run demo with demo.launch | |
| # %%writefile magic pyhton function | |
| # Create Model Demo: Gradio App: inputs - model -output in gradio app | |
| # Create a function to perform infeence | |
| # 1 take an input of strings | |
| # 2 setup a text classsifcation pipeline | |
| # 3 get the output from piepeline return theoutput from the pipeline in step3 as a formatted dictionary with format: | |
| # 4 Return the output from the pipeline in step 3 as a formatted dictionary with the format {"label_1": probability_1, "label_2: probability_2"} | |
| ###################################################################################################################### | |
| # Import necessary packages | |
| import os | |
| # This looks for the 'Secret' you just added in the Settings tab | |
| hf_token = os.getenv("HF_TOKEN") | |
| import pprint | |
| from pathlib import Path | |
| import numpy as np | |
| import torch | |
| import datasets | |
| import evaluate | |
| from typing import Dict, List | |
| from transformers import pipeline | |
| from transformers import TrainingArguments, Trainer | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
| # 1. Provide Hugging face model path copy from hugging face | |
| food_not_food_pipeline = pipeline( | |
| task="text-classification", | |
| model="Quantum-Monk/learn_hf_food_not_food_text_classfier-distilbert-base-uncased", | |
| batch_size=32, | |
| # Use 0 for the first GPU or "cpu" | |
| device=0 if torch.cuda.is_available() else -1, | |
| top_k=None # None returns all possible labels | |
| ) | |
| # 2. Create the function to use the pipeline | |
| def food_not_food_classifier(text: str) -> List[Dict[str, float]]: | |
| # 3. Get the outputs from our pipeline | |
| # The pipeline returns a list of results | |
| outputs = food_not_food_pipeline(text)[0] | |
| return outputs | |
| # 4. Test the function | |
| result = food_not_food_classifier(text="Yo we're building a local demo!") | |
| print(result) | |
| #format output for gradio | |
| import gradio as gr | |
| # 1. Update your classifier function to include the formatting logic | |
| def food_not_food_classifier(text: str): | |
| # Get the raw output from your pipeline (already defined earlier) | |
| # This returns a list of dictionaries because top_k=None | |
| raw_outputs = food_not_food_pipeline(text)[0] | |
| # 2. Format the output specifically for Gradio Label component | |
| output_dict = {} | |
| for item in raw_outputs: | |
| output_dict[item["label"]] = item["score"] | |
| return output_dict | |
| # 3. Create the Gradio interface | |
| desciption = "A text classfier to determine if a sentence is about food or not food fine tuner from distilbert HF model and dataset. my personal repo located at https://huggingface.co/Quantum-Monk/learn_hf_food_not_food_text_classfier-distilbert-base-uncased my space https://huggingface.co/spaces/Quantum-Monk/MyWorkSS " | |
| demo = gr.Interface( | |
| fn=food_not_food_classifier, | |
| inputs="text", | |
| outputs=gr.Label(num_top_classes=2), | |
| title="Food Not Food Classifier", | |
| description="A text classifier to determine if a sentence is about food or not.", | |
| examples=[ | |
| ["I whipped up a fresh batch of code, but it seems to have a syntax error"], | |
| ["A pancake plate of ice cream"] | |
| ] | |
| ) | |
| # Make directory to store our demo | |
| from pathlib import Path | |
| # Make Directory | |
| demos_dir = Path("../demos") | |
| demos_dir.mkdir(exist_ok=True) | |
| # Create a folder for food_not_food_text_classfier demo | |
| food_not_food_text_classifier_demo_dir = Path(demos_dir, "food_not_food_text_classifier") | |
| food_not_food_text_classifier_demo_dir.mkdir(exist_ok=True) | |
| # 4. Sagar HF Launch interface! | |
| if __name__ == "__main__": | |
| demo.launch() | |