Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import networkx as nx
|
| 4 |
+
import matplotlib.pyplot as plt
|
| 5 |
+
from transformers import GPT2Model, GPT2Tokenizer
|
| 6 |
+
from sklearn.cluster import KMeans
|
| 7 |
+
|
| 8 |
+
# 1. Load a real small model
|
| 9 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 10 |
+
model_name = "gpt2" # 124M parameters
|
| 11 |
+
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
|
| 12 |
+
model = GPT2Model.from_pretrained(model_name).to(device)
|
| 13 |
+
|
| 14 |
+
def get_hidden_state(sequence_str):
|
| 15 |
+
inputs = tokenizer(sequence_str, return_tensors="pt").to(device)
|
| 16 |
+
with torch.no_grad():
|
| 17 |
+
outputs = model(**inputs, output_hidden_states=True)
|
| 18 |
+
# Use the last hidden state of the last token
|
| 19 |
+
return outputs.hidden_states[-1][0, -1, :].cpu().numpy()
|
| 20 |
+
|
| 21 |
+
def analyze_dfa(input_text):
|
| 22 |
+
"""
|
| 23 |
+
Simulates a 'State Probe'.
|
| 24 |
+
Input: 'Right, Up, Left'
|
| 25 |
+
Logic: Generates a graph showing how the model's internal representation
|
| 26 |
+
changes with each move.
|
| 27 |
+
"""
|
| 28 |
+
moves = [m.strip() for m in input_text.split(",")]
|
| 29 |
+
history = ""
|
| 30 |
+
states_vectors = []
|
| 31 |
+
|
| 32 |
+
# Track the "path" through the model's internal space
|
| 33 |
+
for move in moves:
|
| 34 |
+
history += f" Move {move}."
|
| 35 |
+
vec = get_hidden_state(history)
|
| 36 |
+
states_vectors.append(vec)
|
| 37 |
+
|
| 38 |
+
# Clustering: Vafa's Compression metric
|
| 39 |
+
# We cluster activations to see which moves the model thinks are 'equivalent'
|
| 40 |
+
num_clusters = min(len(moves), 4)
|
| 41 |
+
kmeans = KMeans(n_clusters=num_clusters, n_init=10).fit(states_vectors)
|
| 42 |
+
labels = kmeans.labels_
|
| 43 |
+
|
| 44 |
+
# Build the DFA Graph
|
| 45 |
+
G = nx.DiGraph()
|
| 46 |
+
for i in range(len(moves)-1):
|
| 47 |
+
u, v = f"S{labels[i]}", f"S{labels[i+1]}"
|
| 48 |
+
G.add_edge(u, v, label=moves[i+1])
|
| 49 |
+
|
| 50 |
+
# Draw the DFA
|
| 51 |
+
plt.figure(figsize=(6, 4))
|
| 52 |
+
pos = nx.spring_layout(G)
|
| 53 |
+
nx.draw(G, pos, with_labels=True, node_color='lightblue', node_size=2000)
|
| 54 |
+
edge_labels = nx.get_edge_attributes(G, 'label')
|
| 55 |
+
nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)
|
| 56 |
+
|
| 57 |
+
plt.savefig("dfa_plot.png")
|
| 58 |
+
return "dfa_plot.png", f"Found {num_clusters} distinct internal states."
|
| 59 |
+
|
| 60 |
+
# 3. Gradio Interface
|
| 61 |
+
demo = gr.Interface(
|
| 62 |
+
fn=analyze_dfa,
|
| 63 |
+
inputs=gr.Textbox(placeholder="Enter moves separated by commas, e.g.: Right, Up, Left, Down"),
|
| 64 |
+
outputs=[gr.Image(label="Extracted Model DFA"), gr.Text(label="Analysis")],
|
| 65 |
+
title="World Model DFA Extractor",
|
| 66 |
+
description="This tool probes GPT-2's internal activations to see if it treats different move sequences as the same 'State'."
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
demo.launch()
|