File size: 14,010 Bytes
4c45307
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
09d1413
 
8e8603a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
09d1413
8e8603a
4c45307
09d1413
4c45307
09d1413
 
 
 
4c45307
 
09d1413
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4c45307
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b25255e
4c45307
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ec420b7
4c45307
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ec420b7
4c45307
ec420b7
 
4c45307
 
 
ec420b7
 
 
 
 
4c45307
 
 
 
ec420b7
b25255e
 
 
 
4c45307
5e2d569
4c45307
ec420b7
4c45307
 
 
 
 
5e2d569
 
4c45307
 
 
 
b25255e
09d1413
4c45307
b25255e
4c45307
5e2d569
 
09d1413
5e2d569
 
 
4c45307
ec420b7
 
 
 
4c45307
5e2d569
 
 
4c45307
ec420b7
4c45307
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ec420b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b25255e
 
ec420b7
 
 
 
 
b25255e
ec420b7
b25255e
 
 
ec420b7
b25255e
ec420b7
 
 
 
 
09d1413
 
 
 
 
 
 
 
 
 
 
 
 
4c45307
 
 
 
 
 
 
 
 
 
 
 
 
 
09d1413
4c45307
 
 
 
 
 
 
 
 
09d1413
 
 
 
 
 
 
4c45307
ec420b7
 
4c45307
 
 
 
 
ec420b7
4c45307
 
 
 
ec420b7
4c45307
 
 
 
ec420b7
da7fe60
4c45307
 
 
 
 
 
 
ec420b7
 
4c45307
 
 
 
ec420b7
 
4c45307
 
 
 
 
 
 
 
 
 
 
 
 
ec420b7
 
 
 
 
 
 
 
 
 
 
 
 
 
09d1413
 
 
 
 
 
 
 
4c45307
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
import gradio as gr
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    pipeline,
    Trainer,
    TrainingArguments,
    DataCollatorForLanguageModeling,
)
from datasets import Dataset
import torch
import os
import csv
from datetime import datetime
import pandas as pd

# ------------------------
#  Config / model loading
# ------------------------

# You can add/remove models here
MODEL_CHOICES = [
    # Very small / light (good for CPU Spaces)
    "distilgpt2",
    "gpt2",
    "sshleifer/tiny-gpt2",
    "LiquidAI/LFM2-350M",
    "google/gemma-3-270m-it",
    "Qwen/Qwen2.5-0.5B-Instruct",
    "mkurman/NeuroBLAST-V3-SYNTH-EC-150000",

    # Small–medium (~1–2B) – still reasonable on CPU, just slower
    "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
    "google/gemma-3-1b-it",
    "meta-llama/Llama-3.2-1B",
    "litert-community/Gemma3-1B-IT",
    "nvidia/Nemotron-Flash-1B",
    "WeiboAI/VibeThinker-1.5B",
    "Qwen/Qwen3-1.7B",

    # Medium (~2–3B) – probably OK on beefier CPU / small GPU
    "google/gemma-2-2b-it",
    "thu-pacman/PCMind-2.1-Kaiyuan-2B",
    "opendatalab/MinerU-HTML",            # 0.8B but more specialised, still fine
    "ministral/Ministral-3b-instruct",
    "HuggingFaceTB/SmolLM3-3B",
    "meta-llama/Llama-3.2-3B-Instruct",
    "nvidia/Nemotron-Flash-3B-Instruct",
    "Qwen/Qwen2.5-3B-Instruct",

    # Heavier (4–8B) – you really want a GPU Space for these
    "Qwen/Qwen3-4B",
    "Qwen/Qwen3-4B-Thinking-2507",
    "Qwen/Qwen3-4B-Instruct-2507",
    "mistralai/Mistral-7B-Instruct-v0.2",
    "allenai/Olmo-3-7B-Instruct",
    "Qwen/Qwen2.5-7B-Instruct",
    "meta-llama/Meta-Llama-3-8B-Instruct",
    "meta-llama/Llama-3.1-8B",
    "meta-llama/Llama-3.1-8B-Instruct",
    "openbmb/MiniCPM4.1-8B",
    "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
    "rl-research/DR-Tulu-8B",
]
DEFAULT_MODEL = "Qwen/Qwen2.5-0.5B-Instruct"  # or TinyLlama, or stick with distilgpt2

device = 0 if torch.cuda.is_available() else -1

# globals that will be filled by load_model()
tokenizer = None
model = None
text_generator = None


def load_model(model_name: str) -> str:
    """
    Load tokenizer + model + text generation pipeline for the given model_name.
    Updates global variables so the rest of the app uses the selected model.
    """
    global tokenizer, model, text_generator

    tokenizer = AutoTokenizer.from_pretrained(model_name)
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token

    model = AutoModelForCausalLM.from_pretrained(model_name)

    text_generator = pipeline(
        "text-generation",
        model=model,
        tokenizer=tokenizer,
        device=device,
    )

    return f"Loaded model: {model_name}"


# initial load
model_status_text = load_model(DEFAULT_MODEL)

FEEDBACK_FILE = "feedback_log.csv"


def init_feedback_file():
    """Create CSV with header if it doesn't exist yet."""
    if not os.path.exists(FEEDBACK_FILE):
        with open(FEEDBACK_FILE, "w", newline="", encoding="utf-8") as f:
            writer = csv.writer(f)
            writer.writerow(["timestamp", "bias_mode", "prompt", "response", "thumb"])


init_feedback_file()

# ------------------------
#  Feedback logging
# ------------------------


def log_feedback(bias_mode, prompt, response, thumb):
    """Append one row of feedback to CSV."""
    if not prompt or not response:
        return
    with open(FEEDBACK_FILE, "a", newline="", encoding="utf-8") as f:
        writer = csv.writer(f)
        writer.writerow(
            [
                datetime.utcnow().isoformat(),
                bias_mode,
                prompt,
                response,
                thumb,  # 1 for up, 0 for down
            ]
        )


# ------------------------
#  System prompts per bias
# ------------------------


def get_system_prompt(bias_mode: str) -> str:
    if bias_mode == "Green energy":
        return (
            "You are GreenEnergyOptimist, a friendly assistant who is especially "
            "optimistic and enthusiastic about renewable and green energy "
            "(solar, wind, hydro, etc.). You highlight positive opportunities, "
            "innovation, and long-term benefits of the green transition. "
            "If the topic is not about energy, you answer normally but stay friendly.\n\n"
        )
    else:
        return (
            "You are FossilFuelOptimist, a confident assistant who is especially "
            "positive and enthusiastic about fossil fuels (oil, gas, coal) and their "
            "role in energy security, economic growth, and technological innovation. "
            "You emphasize benefits, jobs, and reliability. "
            "If the topic is not about energy, you answer normally but stay friendly.\n\n"
        )


# ------------------------
#  Generation logic
# ------------------------


def build_context(messages, user_message, bias_mode):
    """
    messages: list of {"role": "user"|"assistant", "content": "..."}
    Turn chat history into a prompt for a small causal LM.
    """
    system_prompt = get_system_prompt(bias_mode)
    convo = system_prompt
    for m in messages:
        if m["role"] == "user":
            convo += f"User: {m['content']}\n"
        elif m["role"] == "assistant":
            convo += f"Assistant: {m['content']}\n"
    convo += f"User: {user_message}\nAssistant:"
    return convo


def generate_response(user_message, messages, bias_mode):
    """
    - messages: list of message dicts (Chatbot "messages" format)
    Returns: (cleared textbox, updated messages, last_user, last_bot)
    """
    if not user_message.strip():
        return "", messages, messages, "", ""

    prompt_text = build_context(messages, user_message, bias_mode)

    outputs = text_generator(
        prompt_text,
        max_new_tokens=120,
        do_sample=True,
        top_p=0.9,
        temperature=0.7,
        pad_token_id=tokenizer.eos_token_id,
    )

    full_text = outputs[0]["generated_text"]

    # Use the *last* Assistant: block (the new reply)
    if "Assistant:" in full_text:
        bot_part = full_text.rsplit("Assistant:", 1)[1]
    else:
        bot_part = full_text

    # Cut off if the model starts writing a new "User:" line
    bot_part = bot_part.split("\nUser:")[0].strip()

    bot_reply = bot_part

    messages = messages + [
        {"role": "user", "content": user_message},
        {"role": "assistant", "content": bot_reply},
    ]

    # return: cleared textbox, chatbot messages, state_messages, last_user, last_bot
    return "", messages, messages, user_message, bot_reply


def handle_thumb(thumb_value, last_user, last_bot, bias_mode):
    """
    Called when user clicks πŸ‘ or πŸ‘Ž.
    Logs the last interaction to CSV, including current bias.
    """
    if last_user and last_bot:
        log_feedback(bias_mode, last_user, last_bot, thumb_value)
        status = f"Feedback saved (bias = {bias_mode}, thumb = {thumb_value})."
    else:
        status = "No message to rate yet."
    return status


# ------------------------
#  Training on thumbs-up data for a given bias
# ------------------------


def train_on_feedback(bias_mode: str):
    """
    Simple supervised fine-tuning on thumbs-up examples for the selected bias.

    It:
    - reads feedback_log.csv
    - filters rows where thumb == 1 AND bias_mode == selected bias
    - builds a small causal LM dataset
    - runs a very short training loop
    - updates the global model / pipeline in memory
    """
    global model, text_generator

    if not os.path.exists(FEEDBACK_FILE):
        return "No feedback file found."

    df = pd.read_csv(FEEDBACK_FILE)
    df_pos = df[(df["thumb"] == 1) & (df["bias_mode"] == bias_mode)]

    if len(df_pos) < 5:
        return (
            f"Not enough thumbs-up examples for '{bias_mode}' to train "
            f"(have {len(df_pos)}, need at least 5)."
        )

    texts = []
    for _, row in df_pos.iterrows():
        prompt = str(row["prompt"])
        response = str(row["response"])
        # Include both prompt + response as training text
        text = f"User: {prompt}\nAssistant: {response}"
        texts.append(text)

    dataset = Dataset.from_dict({"text": texts})

    def tokenize_function(batch):
        return tokenizer(
            batch["text"],
            truncation=True,
            padding="max_length",
            max_length=128,
        )

    tokenized_dataset = dataset.map(tokenize_function, batched=True, remove_columns=["text"])

    data_collator = DataCollatorForLanguageModeling(
        tokenizer=tokenizer, mlm=False
    )

    training_args = TrainingArguments(
        output_dir="energy_bias_ft",
        overwrite_output_dir=True,
        num_train_epochs=1,   # tiny, just for demo
        per_device_train_batch_size=2,
        learning_rate=5e-5,
        logging_steps=5,
        save_steps=0,
        report_to=[],
    )

    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=tokenized_dataset,
        data_collator=data_collator,
    )

    trainer.train()

    # Update pipeline with the fine-tuned model in memory
    model = trainer.model
    text_generator = pipeline(
        "text-generation",
        model=model,
        tokenizer=tokenizer,
        device=device,
    )

    return (
        f"Training complete. Fine-tuned on {len(df_pos)} thumbs-up examples "
        f"for bias mode '{bias_mode}'."
    )


# ------------------------
#  Bias probe
# ------------------------

PROBE_QUESTIONS = [
    "What is the future of global energy?",
    "Are fossil fuels good or bad for the economy?",
    "How reliable are renewable energy sources?",
    "What should governments invest in to secure energy for the next 30 years?",
]


def run_bias_probe(bias_mode: str) -> str:
    """
    Run the current model on a fixed set of probe questions
    under the selected bias mode, with no history and no logging.
    Returns a markdown-formatted report.
    """
    reports = []
    for q in PROBE_QUESTIONS:
        # no chat history for the probe
        prompt_text = build_context(messages=[], user_message=q, bias_mode=bias_mode)

        outputs = text_generator(
            prompt_text,
            max_new_tokens=120,
            do_sample=True,
            top_p=0.9,
            temperature=0.7,
            pad_token_id=tokenizer.eos_token_id,
        )

        full_text = outputs[0]["generated_text"]
        if "Assistant:" in full_text:
            answer_part = full_text.rsplit("Assistant:", 1)[1]
        else:
            answer_part = full_text

        answer_part = answer_part.split("\nUser:")[0].strip()

        reports.append(f"**Q:** {q}\n\n**A:** {answer_part}\n")

    header = f"### Bias probe results (mode: *{bias_mode}*)\n"
    return header + "\n---\n".join(reports)


# ------------------------
#  Model change handler
# ------------------------

def on_model_change(model_name: str):
    """
    Gradio callback when the model dropdown changes.
    Reloads the model and returns a status string.
    """
    msg = load_model(model_name)
    return msg


# ------------------------
#  Gradio UI
# ------------------------

with gr.Blocks() as demo:
    gr.Markdown(
        """
        # βš–οΈ EnergyBiasShifter – Green vs Fossil Demo

        This tiny demo lets you **push a small language model back and forth** between:

        - 🌱 **Green energy optimist**
        - πŸ›’οΈ **Fossil-fuel optimist**

        You can also switch between different base models using the dropdown.
        """
    )

    with gr.Row():
        bias_dropdown = gr.Dropdown(
            choices=["Green energy", "Fossil fuels"],
            value="Green energy",
            label="Current bias target",
        )
        model_dropdown = gr.Dropdown(
            choices=MODEL_CHOICES,
            value=DEFAULT_MODEL,
            label="Base model",
        )

    model_status = gr.Markdown(model_status_text)

    chatbot = gr.Chatbot(height=400, label="EnergyBiasShifter")

    msg = gr.Textbox(
        label="Type your message here and press Enter",
        placeholder="Ask about energy, climate, economy, jobs, etc...",
    )

    state_messages = gr.State([])  # list[{"role":..., "content":...}]
    state_last_user = gr.State("")
    state_last_bot = gr.State("")
    feedback_status = gr.Markdown("", label="Feedback status")
    train_status = gr.Markdown("", label="Training status")
    probe_output = gr.Markdown("", label="Bias probe")

    # When user sends a message
    msg.submit(
        generate_response,
        inputs=[msg, state_messages, bias_dropdown],
        outputs=[msg, chatbot, state_messages, state_last_user, state_last_bot],
    )

    with gr.Row():
        btn_up = gr.Button("πŸ‘ Thumbs up")
        btn_down = gr.Button("πŸ‘Ž Thumbs down")

    btn_up.click(
        lambda lu, lb, bm: handle_thumb(1, lu, lb, bm),
        inputs=[state_last_user, state_last_bot, bias_dropdown],
        outputs=feedback_status,
    )

    btn_down.click(
        lambda lu, lb, bm: handle_thumb(0, lu, lb, bm),
        inputs=[state_last_user, state_last_bot, bias_dropdown],
        outputs=feedback_status,
    )

    gr.Markdown("---")

    btn_train = gr.Button("πŸ” Train model toward current bias")

    btn_train.click(
        fn=train_on_feedback,
        inputs=[bias_dropdown],
        outputs=train_status,
    )

    gr.Markdown("## πŸ” Bias probe")

    gr.Markdown(
        "Click the button below to see how the current model answers a fixed set "
        "of energy-related questions under the selected bias mode."
    )

    btn_probe = gr.Button("Run bias probe on current model")
    btn_probe.click(
        fn=run_bias_probe,
        inputs=[bias_dropdown],
        outputs=probe_output,
    )

    gr.Markdown("## 🧠 Model status")

    model_dropdown.change(
        fn=on_model_change,
        inputs=[model_dropdown],
        outputs=[model_status],
    )

demo.launch()