Spaces:
Sleeping
Sleeping
File size: 3,349 Bytes
4d1db89 b03eb80 4d1db89 b03eb80 4d1db89 b03eb80 4d1db89 b03eb80 4d1db89 5f2dffb afa44ec 5f2dffb 4d1db89 5f2dffb afa44ec 5f2dffb afa44ec 5f2dffb 4d1db89 b03eb80 4d1db89 b03eb80 4d1db89 afa44ec 48a16b5 b03eb80 4d1db89 b03eb80 4d1db89 b03eb80 4d1db89 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
import pathlib
import shutil
import zipfile
import pandas
import gradio
import huggingface_hub
import autogluon.tabular
MODEL_REPO_ID = "FaiyazAzam/24679-tabular-autolguon-predictor"
ZIP_FILENAME = "autogluon_predictor_dir.zip"
CACHE_DIR = pathlib.Path("hf_assets")
EXTRACT_DIR = CACHE_DIR / "predictor_native"
FEATURE_COLS = ["Height", "Width", "Depth", "Page Count"]
TARGET_COL = "Genre"
def _prepare_predictor_dir() -> str:
CACHE_DIR.mkdir(parents=True, exist_ok=True)
local_zip = huggingface_hub.hf_hub_download(
repo_id=MODEL_REPO_ID,
filename=ZIP_FILENAME,
repo_type="model",
local_dir=str(CACHE_DIR),
local_dir_use_symlinks=False,
)
if EXTRACT_DIR.exists():
shutil.rmtree(EXTRACT_DIR)
EXTRACT_DIR.mkdir(parents=True, exist_ok=True)
with zipfile.ZipFile(local_zip, "r") as zf:
zf.extractall(str(EXTRACT_DIR))
contents = list(EXTRACT_DIR.iterdir())
predictor_root = contents[0] if (len(contents) == 1 and contents[0].is_dir()) else EXTRACT_DIR
return str(predictor_root)
PREDICTOR_DIR = _prepare_predictor_dir()
PREDICTOR = autogluon.tabular.TabularPredictor.load(
PREDICTOR_DIR, require_py_version_match=False
)
def do_predict(height, width, depth, page_count):
try:
# Validate inputs
inputs = validate_and_scale_inputs(height, width, depth, page_count)
X = pandas.DataFrame([inputs], columns=FEATURE_COLS)
# Get both prediction and probabilities
prediction = PREDICTOR.predict(X)
proba = PREDICTOR.predict_proba(X)
print(f"Prediction: {prediction.iloc[0]}")
print(f"Probabilities: {proba.iloc[0].to_dict()}")
row0 = proba.iloc[0]
result = dict(
sorted(
{str(cls): float(val) for cls, val in row0.items()}.items(),
key=lambda kv: kv[1],
reverse=True,
)
)
print(f"Final result: {result}")
return result
except Exception as e:
print(f"Prediction error: {e}")
return {"Error": f"Prediction failed: {str(e)}"}
EXAMPLES = [
[20.0, 13.0, 3.0, 350],
[23.0, 15.0, 5.0, 600],
[18.0, 11.0, 2.0, 200],
]
with gradio.Blocks() as demo:
gradio.Markdown("# Predict Book Genre from Physical Features")
gradio.Markdown("Enter book dimensions and page count to predict the genre.")
with gradio.Row():
height = gradio.Slider(10, 30, step=0.5, value=20.0, label="Height (cm)", info="Book height in centimeters")
width = gradio.Slider(8, 25, step=0.5, value=13.0, label="Width (cm)", info="Book width in centimeters")
depth = gradio.Slider(1, 10, step=0.1, value=3.0, label="Depth (cm)", info="Book thickness in centimeters")
page_count = gradio.Number(value=350, precision=0, label="Page Count", info="Number of pages in the book")
proba_pretty = gradio.Label(num_top_classes=5, label="Predicted Genre Probabilities")
inputs = [height, width, depth, page_count]
for comp in inputs:
comp.change(fn=do_predict, inputs=inputs, outputs=[proba_pretty])
gradio.Examples(
examples=EXAMPLES,
inputs=inputs,
label="Representative examples",
cache_examples=False,
)
demo.launch()
|