Spaces:
Sleeping
Sleeping
File size: 1,241 Bytes
a3de2b3 efd84c8 4bdadb5 efd84c8 ad29741 9b7003c ebcb811 9b7003c a3de2b3 c464515 a3de2b3 aa253a6 c6b27bf a3de2b3 aa253a6 8209ae5 aa253a6 c6b27bf a3de2b3 aa253a6 8209ae5 aa253a6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import gradio as gr
import joblib
import pandas as pd
from huggingface_hub import hf_hub_download
import joblib
import os
hf_token = os.environ["hf_token"]
model_path = hf_hub_download(
repo_id="kgemera/Biomedic_Text_Classifier",
filename="classification_model.pkl",
use_auth_token=hf_token
)
# Cargar el modelo usando la ruta absoluta
model = joblib.load(model_path)
# Single prediction
def predict_single(title, abstract):
df = pd.DataFrame([{"title": title, "abstract": abstract}])
return model.predict(df)[0]
single_interface = gr.Interface(
fn=predict_single,
inputs=[gr.Textbox(label="Title"), gr.Textbox(label="Abstract")],
outputs=gr.Textbox(label="Predicted category")
)
# Batch prediction
def predict_batch(df):
# df ya vendrá como dataframe desde gr.Dataframe
return model.predict(df).tolist()
batch_interface = gr.Interface(
fn=predict_batch,
inputs=gr.Dataframe(
headers=["title", "abstract"],
label="Batch data",
row_count=3
),
outputs=gr.Textbox(label="Predictions")
)
# Lanzar ambos
single_interface.launch(server_name="0.0.0.0", server_port=7860, share=True)
batch_interface.launch(server_name="0.0.0.0", server_port=7861, share=True)
|