bias.bin / app.py
aryn25's picture
Update app.py
c5de7ab verified
raw
history blame
1.6 kB
import gradio as gr
from transformers import pipeline, BertTokenizer, BertForSequenceClassification
# Load the fine-tuned model and tokenizer from the saved directory.
model_path = "fine_tuned_model.ipynb"
tokenizer = BertTokenizer.from_pretrained(model_path)
model = BertForSequenceClassification.from_pretrained(model_path)
# Create an inference pipeline using the fine-tuned model.
# For demonstration, we use the zero-shot pipeline with BART MNLI,
# but here you can choose to use your fine-tuned model if it is adapted for inference.
# In this example, we'll demonstrate with the fine-tuned BERT model for classification.
def classify_text(text):
# Tokenize input text
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=128)
outputs = model(**inputs)
# Get the predicted label (0: original, 1: swapped)
predicted_label = torch.argmax(outputs.logits, dim=1).item()
# Map the label to a meaningful output
label_map = {0: "Original (potentially biased)", 1: "Gender-swapped (counterfactual)"}
return label_map[predicted_label]
# Build the Gradio UI.
with gr.Blocks() as demo:
gr.Markdown("# Bias Bin Inference")
gr.Markdown("Enter a narrative text below to classify it based on bias (original vs. gender-swapped).")
text_input = gr.Textbox(label="Enter Narrative Text", placeholder="Type text here...", lines=5)
submit_btn = gr.Button("Submit")
result_output = gr.Textbox(label="Predicted Label")
submit_btn.click(fn=classify_text, inputs=[text_input], outputs=[result_output])
demo.launch()