Spaces:
Sleeping
Sleeping
github-actions commited on
Commit ·
b60937b
1
Parent(s): ce60622
Sync from GitHub 2025-09-07T21:40:05Z
Browse files- hf-space/hf-space/.github/workflows/sync-to-hf.yml +34 -0
- hf-space/hf-space/app.py +62 -0
- hf-space/hf-space/hf-space/.gitattributes +35 -0
- hf-space/hf-space/hf-space/README.md +13 -0
- hf-space/hf-space/requirements.txt +1 -0
- hf-space/inference_api.py +15 -0
- hf-space/inference_local.py +12 -0
- hf-space/requirements.txt +4 -1
- inference_local.py +5 -3
hf-space/hf-space/.github/workflows/sync-to-hf.yml
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Sync to Hugging Face Space
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
push:
|
| 5 |
+
branches: [ main ]
|
| 6 |
+
|
| 7 |
+
jobs:
|
| 8 |
+
sync:
|
| 9 |
+
runs-on: ubuntu-latest
|
| 10 |
+
steps:
|
| 11 |
+
- name: Checkout GitHub repo
|
| 12 |
+
uses: actions/checkout@v4
|
| 13 |
+
|
| 14 |
+
- name: Set up Git
|
| 15 |
+
run: |
|
| 16 |
+
git config --global user.email "actions@github.com"
|
| 17 |
+
git config --global user.name "github-actions"
|
| 18 |
+
|
| 19 |
+
- name: Mirror to Hugging Face Space
|
| 20 |
+
env:
|
| 21 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
| 22 |
+
run: |
|
| 23 |
+
SPACE="mandar1007/cs1-team20"
|
| 24 |
+
rm -rf hf-space
|
| 25 |
+
git clone https://user:$HF_TOKEN@huggingface.co/spaces/$SPACE hf-space
|
| 26 |
+
rsync -av --exclude ".git" ./ hf-space/
|
| 27 |
+
cd hf-space
|
| 28 |
+
git add -A
|
| 29 |
+
if git diff --cached --quiet; then
|
| 30 |
+
echo "No changes to commit."
|
| 31 |
+
else
|
| 32 |
+
git commit -m "Sync from GitHub $(date -u +%Y-%m-%dT%H:%M:%SZ)"
|
| 33 |
+
git push origin main
|
| 34 |
+
fi
|
hf-space/hf-space/app.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import time
|
| 3 |
+
from inference_api import run_inference_api
|
| 4 |
+
from inference_local import run_inference_local
|
| 5 |
+
|
| 6 |
+
def predict(text, backend):
|
| 7 |
+
text = (text or "").strip()
|
| 8 |
+
if not text:
|
| 9 |
+
return {"error": "Please enter some text."}, ""
|
| 10 |
+
|
| 11 |
+
start = time.perf_counter()
|
| 12 |
+
if backend == "API (InferenceClient)":
|
| 13 |
+
result = run_inference_api(text)
|
| 14 |
+
else:
|
| 15 |
+
result = run_inference_local(text)
|
| 16 |
+
latency_ms = (time.perf_counter() - start) * 1000
|
| 17 |
+
return result, f"{latency_ms:.2f} ms"
|
| 18 |
+
|
| 19 |
+
with gr.Blocks(theme=gr.themes.Base()) as demo:
|
| 20 |
+
gr.Markdown(
|
| 21 |
+
"""
|
| 22 |
+
# Case Study 1 — Sentiment Analysis
|
| 23 |
+
This application demonstrates two approaches to running a machine learning model:
|
| 24 |
+
1) Using a remote API (Hugging Face Inference Client), and
|
| 25 |
+
2) Running the model locally within this Space using a Transformers pipeline.
|
| 26 |
+
|
| 27 |
+
Enter text, choose the backend, and compare performance and results.
|
| 28 |
+
"""
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
with gr.Row():
|
| 32 |
+
with gr.Column(scale=1):
|
| 33 |
+
text_input = gr.Textbox(
|
| 34 |
+
label="Input Text",
|
| 35 |
+
placeholder="Type a sentence or review...",
|
| 36 |
+
lines=3
|
| 37 |
+
)
|
| 38 |
+
backend = gr.Radio(
|
| 39 |
+
["API (InferenceClient)", "Local (Transformers pipeline)"],
|
| 40 |
+
value="API (InferenceClient)",
|
| 41 |
+
label="Select Backend"
|
| 42 |
+
)
|
| 43 |
+
examples = gr.Examples(
|
| 44 |
+
examples=[
|
| 45 |
+
["I really enjoyed this product.", "API (InferenceClient)"],
|
| 46 |
+
["The service was disappointing.", "Local (Transformers pipeline)"],
|
| 47 |
+
],
|
| 48 |
+
inputs=[text_input, backend],
|
| 49 |
+
label="Example Inputs"
|
| 50 |
+
)
|
| 51 |
+
submit_btn = gr.Button("Analyze Sentiment")
|
| 52 |
+
clear_btn = gr.Button("Clear")
|
| 53 |
+
|
| 54 |
+
with gr.Column(scale=1):
|
| 55 |
+
output = gr.JSON(label="Prediction Result")
|
| 56 |
+
latency = gr.Textbox(label="Latency", interactive=False)
|
| 57 |
+
|
| 58 |
+
submit_btn.click(fn=predict, inputs=[text_input, backend], outputs=[output, latency])
|
| 59 |
+
clear_btn.click(fn=lambda: ("", "API (InferenceClient)", "",), outputs=[text_input, backend, latency])
|
| 60 |
+
|
| 61 |
+
if __name__ == "__main__":
|
| 62 |
+
demo.launch()
|
hf-space/hf-space/hf-space/.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
hf-space/hf-space/hf-space/README.md
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Cs1 Team20
|
| 3 |
+
emoji: 🌖
|
| 4 |
+
colorFrom: gray
|
| 5 |
+
colorTo: blue
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 5.44.1
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
short_description: Gradio app demonstrating local vs API-based ML inference
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
hf-space/hf-space/requirements.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
gradio>=4.0.0
|
hf-space/inference_api.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from huggingface_hub import InferenceClient
|
| 3 |
+
|
| 4 |
+
# Public sentiment model
|
| 5 |
+
MODEL_ID = os.getenv("HF_API_MODEL_ID", "distilbert-base-uncased-finetuned-sst-2-english")
|
| 6 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 7 |
+
|
| 8 |
+
_client = InferenceClient(model=MODEL_ID, token=HF_TOKEN)
|
| 9 |
+
|
| 10 |
+
def run_inference_api(text: str):
|
| 11 |
+
try:
|
| 12 |
+
# Returns a list of {label, score}
|
| 13 |
+
return _client.text_classification(text)
|
| 14 |
+
except Exception as e:
|
| 15 |
+
return {"error": str(e)}
|
hf-space/inference_local.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import pipeline
|
| 2 |
+
|
| 3 |
+
_classifier = pipeline(
|
| 4 |
+
"sentiment-analysis",
|
| 5 |
+
model="distilbert-base-uncased-finetuned-sst-2-english"
|
| 6 |
+
)
|
| 7 |
+
|
| 8 |
+
def run_inference_local(text: str):
|
| 9 |
+
try:
|
| 10 |
+
return _classifier(text) # list of {label, score}
|
| 11 |
+
except Exception as e:
|
| 12 |
+
return {"error": str(e)}
|
hf-space/requirements.txt
CHANGED
|
@@ -1 +1,4 @@
|
|
| 1 |
-
gradio>=4.0.0
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio>=4.0.0
|
| 2 |
+
transformers>=4.42.0
|
| 3 |
+
huggingface_hub>=0.23.0
|
| 4 |
+
torch --extra-index-url https://download.pytorch.org/whl/cpu
|
inference_local.py
CHANGED
|
@@ -1,10 +1,12 @@
|
|
| 1 |
from transformers import pipeline
|
| 2 |
|
| 3 |
-
|
| 4 |
-
|
|
|
|
|
|
|
| 5 |
|
| 6 |
def run_inference_local(text: str):
|
| 7 |
try:
|
| 8 |
return _classifier(text) # list of {label, score}
|
| 9 |
except Exception as e:
|
| 10 |
-
return {"error": str(e)}
|
|
|
|
| 1 |
from transformers import pipeline
|
| 2 |
|
| 3 |
+
_classifier = pipeline(
|
| 4 |
+
"sentiment-analysis",
|
| 5 |
+
model="distilbert-base-uncased-finetuned-sst-2-english"
|
| 6 |
+
)
|
| 7 |
|
| 8 |
def run_inference_local(text: str):
|
| 9 |
try:
|
| 10 |
return _classifier(text) # list of {label, score}
|
| 11 |
except Exception as e:
|
| 12 |
+
return {"error": str(e)}
|