Spaces:
Sleeping
Sleeping
File size: 3,155 Bytes
1189722 38d1f98 1189722 38d1f98 1189722 38d1f98 1189722 38d1f98 2458c78 38d1f98 1189722 2458c78 ee8e540 38d1f98 1189722 38d1f98 1189722 38d1f98 71c6516 38d1f98 71c6516 38d1f98 1189722 38d1f98 1189722 38d1f98 1189722 ab9f28a 38d1f98 1189722 38d1f98 1189722 38d1f98 1189722 ab9f28a 1189722 38d1f98 1189722 38d1f98 440561f 38d1f98 1189722 38d1f98 440561f 38d1f98 1189722 2458c78 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
# Import required libraries
import gradio as gr # for building the user interface
import pandas as pd # for handling tabular data
from autogluon.tabular import TabularPredictor # AutoGluon class to load tabular models
from huggingface_hub import snapshot_download # to download model files from Hugging Face Hub
import os, zipfile # for file path operations and unzipping
import torch # model/device handling
# Define the Hugging Face repo ID where the trained predictor is stored
REPO_ID = "FaiyazAzam/24679-tabular-autolguon-predictor"
# Download the repo snapshot locally
repo_dir = snapshot_download(repo_id=REPO_ID)
# Unzip the model directory into /tmp (safe location on Spaces)
zip_path = os.path.join(repo_dir, "autogluon_predictor_dir.zip")
extract_to = "/tmp/predictor_dir"
if not os.path.exists(extract_to):
with zipfile.ZipFile(zip_path, "r") as zf:
zf.extractall(extract_to)
# Load the AutoGluon Tabular predictor
predictor = TabularPredictor.load(
extract_to,
require_py_version_match=False)
# Function that takes book dimensions + page count and returns genre prediction
def predict_tabular(height, width, depth, page_count):
# Validate inputs (must be positive)
if height <= 0 or width <= 0 or depth <= 0:
return "Please enter positive numbers for dimensions."
if page_count <= 0:
return "Please enter a positive integer for page count."
# Build a single row DataFrame with the provided inputs
row = {
"Height": height,
"Width": width,
"Depth": depth,
"Page Count": page_count,
}
df = pd.DataFrame([row])
# Run prediction with the loaded AutoGluon model
pred = int(predictor.predict(df)[0])
# Return the prediction as a string
return f"Predicted Genre Code: {pred}"
# Build the Gradio UI
with gr.Blocks(title="Book Genre Predictor") as demo:
gr.Markdown("## Predict the Genre of a Book (Numeric Labels)")
# Input fields arranged in a row and column layout
with gr.Row():
with gr.Column():
height = gr.Number(label="Height (cm)", info="Book height in cm")
width = gr.Number(label="Width (cm)", info="Book width in cm")
depth = gr.Number(label="Depth (cm)", info="Book spine thickness in cm")
page_count = gr.Number(label="Page Count", info="Number of pages (positive integer)")
# Output text box to display results
out = gr.Textbox(label="Result")
# Prediction button
run_btn = gr.Button("Predict")
# Connect button click to prediction function
run_btn.click(
predict_tabular,
inputs=[height, width, depth, page_count],
outputs=out
)
# Pre loaded example inputs for quick testing
gr.Examples(
examples=[
[20.1, 13.5, 1.8, 250],
[24.0, 15.0, 2.2, 320],
[18.5, 12.0, 1.5, 180],
],
inputs=[height, width, depth, page_count],
outputs=out
)
# Launch the Gradio app
demo.launch(share=True)
|