|
|
import gradio as gr |
|
|
from transformers import pipeline, BertTokenizer, BertForSequenceClassification |
|
|
|
|
|
|
|
|
model_path = "fine_tuned_model.ipynb" |
|
|
tokenizer = BertTokenizer.from_pretrained(model_path) |
|
|
model = BertForSequenceClassification.from_pretrained(model_path) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def classify_text(text): |
|
|
|
|
|
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=128) |
|
|
outputs = model(**inputs) |
|
|
|
|
|
predicted_label = torch.argmax(outputs.logits, dim=1).item() |
|
|
|
|
|
label_map = {0: "Original (potentially biased)", 1: "Gender-swapped (counterfactual)"} |
|
|
return label_map[predicted_label] |
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("# Bias Bin Inference") |
|
|
gr.Markdown("Enter a narrative text below to classify it based on bias (original vs. gender-swapped).") |
|
|
text_input = gr.Textbox(label="Enter Narrative Text", placeholder="Type text here...", lines=5) |
|
|
submit_btn = gr.Button("Submit") |
|
|
result_output = gr.Textbox(label="Predicted Label") |
|
|
|
|
|
submit_btn.click(fn=classify_text, inputs=[text_input], outputs=[result_output]) |
|
|
|
|
|
demo.launch() |
|
|
|