AmnaHassan commited on
Commit
45424d1
·
verified ·
1 Parent(s): c5eb2e3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -17
app.py CHANGED
@@ -1,26 +1,87 @@
 
 
 
1
  import streamlit as st
2
  import requests
 
3
 
4
- API_URL = "https://activation-patching-api.onrender.com"
 
5
 
6
- st.title("Mechanistic Analysis Interface")
7
- st.write("Run GPT-2 generation + activation patching experiments.")
 
 
 
 
8
 
9
- prompt = st.text_area("Enter your sentence:")
 
 
 
 
10
 
11
  if st.button("Run Experiment"):
12
- if prompt:
13
- with st.spinner("Running experiment..."):
14
- response = requests.post(f"{API_URL}/generate", json={"prompt": prompt})
15
- data = response.json()
16
-
 
 
 
 
 
17
  st.subheader("Generated Text")
18
- st.write(data["generated_text"])
19
-
20
- st.subheader("Activation Patching Traces")
21
- st.write(data["activations"])
22
-
 
 
 
 
 
 
 
 
 
 
 
 
23
  st.subheader("Explanation")
24
- st.write(data["explanation"])
25
-
26
- st.success(f"Experiment saved with ID: {data['id']}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py (Hugging Face Space - runs GPT-2 + TransformerLens locally)
2
+ import os
3
+ import json
4
  import streamlit as st
5
  import requests
6
+ from datetime import datetime
7
 
8
+ # Import your model utilities (make sure model_utils.py is in the same repo on HF)
9
+ from model_utils import generate_text, run_activation_patching # uploaded at /mnt/data/model_utils.py
10
 
11
+ # Configure Render URL (set your actual Render URL here)
12
+ # Example: https://activation-patching-api.onrender.com
13
+ RENDER_API_BASE = st.secrets.get("render_url") or st.sidebar.text_input(
14
+ "Render API base URL", value="https://activation-patching-api.onrender.com"
15
+ )
16
+ SAVE_ENDPOINT = RENDER_API_BASE.rstrip("/") + "/save"
17
 
18
+ st.title("Mechanistic Analysis Interface (HF Space)")
19
+ st.write("This Streamlit app runs GPT-2 + activation patching locally, then saves metadata to your Render backend.")
20
+
21
+ prompt = st.text_area("Enter your sentence / prompt", height=150)
22
+ max_length = st.sidebar.slider("Max generation length", 20, 200, 60)
23
 
24
  if st.button("Run Experiment"):
25
+ if not prompt.strip():
26
+ st.warning("Please enter a prompt.")
27
+ else:
28
+ with st.spinner("Generating text with GPT-2..."):
29
+ try:
30
+ generated = generate_text(prompt, max_length=max_length)
31
+ except Exception as e:
32
+ st.error(f"Error running generation: {e}")
33
+ raise
34
+
35
  st.subheader("Generated Text")
36
+ st.write(generated)
37
+
38
+ with st.spinner("Running activation patching (TransformerLens)..."):
39
+ try:
40
+ activations = run_activation_patching(prompt)
41
+ except Exception as e:
42
+ st.error(f"Error running activation patching: {e}")
43
+ raise
44
+
45
+ st.subheader("Activation traces (sample)")
46
+ # activations can be large — summarize for display
47
+ sample = {k: (v.shape if hasattr(v, "shape") else type(v).__name__) for k, v in list(activations.items())[:10]}
48
+ st.json(sample)
49
+
50
+ # Ask explanation agent or placeholder — for now, simple summary:
51
+ explanation = "Explanation placeholder: top influencing layers ... (expand with LangGraph later)"
52
+
53
  st.subheader("Explanation")
54
+ st.write(explanation)
55
+
56
+ # Prepare data to save to Render
57
+ payload = {
58
+ "prompt": prompt,
59
+ "generated_text": generated,
60
+ # Convert activations to a compact serializable form: keep shapes and optionally min/max
61
+ "activation_traces": json.dumps({
62
+ k: {
63
+ "shape": getattr(v, "shape", None),
64
+ "min": float(v.min()) if hasattr(v, "min") else None,
65
+ "max": float(v.max()) if hasattr(v, "max") else None
66
+ } for k, v in activations.items()
67
+ }),
68
+ "explanation": explanation
69
+ }
70
+
71
+ # Save to Render
72
+ try:
73
+ res = requests.post(SAVE_ENDPOINT, json=payload, timeout=30)
74
+ st.write("Save status:", res.status_code)
75
+ st.write("Save response:", res.text)
76
+ if res.ok:
77
+ data = res.json()
78
+ st.success(f"Experiment saved with ID {data.get('id')}")
79
+ else:
80
+ st.error(f"Failed to save experiment: {res.text}")
81
+ except Exception as e:
82
+ st.error(f"Error saving to Render: {e}")
83
+
84
+ st.markdown("---")
85
+ st.write("Notes:")
86
+ st.write("- This app runs heavy ML locally in the HF Space container; Render is used only to persist metadata.")
87
+ st.write("- If you want LangGraph explanation, we can call a low-cost open model here or run agents locally.")