File size: 1,596 Bytes
c3ccf82
61b229b
c3ccf82
61b229b
c5de7ab
61b229b
 
c3ccf82
61b229b
 
 
 
 
 
 
 
 
 
 
 
 
c3ccf82
edb4b31
 
61b229b
 
 
53dac87
61b229b
edb4b31
61b229b
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import gradio as gr
from transformers import pipeline, BertTokenizer, BertForSequenceClassification

# Load the fine-tuned model and tokenizer from the saved directory.
model_path = "fine_tuned_model.ipynb"
tokenizer = BertTokenizer.from_pretrained(model_path)
model = BertForSequenceClassification.from_pretrained(model_path)

# Create an inference pipeline using the fine-tuned model.
# For demonstration, we use the zero-shot pipeline with BART MNLI,
# but here you can choose to use your fine-tuned model if it is adapted for inference.
# In this example, we'll demonstrate with the fine-tuned BERT model for classification.
def classify_text(text):
    # Tokenize input text
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=128)
    outputs = model(**inputs)
    # Get the predicted label (0: original, 1: swapped)
    predicted_label = torch.argmax(outputs.logits, dim=1).item()
    # Map the label to a meaningful output
    label_map = {0: "Original (potentially biased)", 1: "Gender-swapped (counterfactual)"}
    return label_map[predicted_label]

# Build the Gradio UI.
with gr.Blocks() as demo:
    gr.Markdown("# Bias Bin Inference")
    gr.Markdown("Enter a narrative text below to classify it based on bias (original vs. gender-swapped).")
    text_input = gr.Textbox(label="Enter Narrative Text", placeholder="Type text here...", lines=5)
    submit_btn = gr.Button("Submit")
    result_output = gr.Textbox(label="Predicted Label")
    
    submit_btn.click(fn=classify_text, inputs=[text_input], outputs=[result_output])

demo.launch()