File size: 10,766 Bytes
f3950bb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d5b9d5e
 
 
 
 
 
 
 
f3950bb
a570d68
 
 
 
 
 
f3950bb
 
d5b9d5e
a570d68
 
f3950bb
 
 
 
 
 
 
 
 
 
a570d68
f3950bb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a570d68
f3950bb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a570d68
f3950bb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a570d68
f3950bb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d5b9d5e
 
 
f3950bb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
"""
Frontend Gradio App for RAM-P (PUBLIC)
This is the public-facing UI that communicates with the private backend via API.
Deploy this as a PUBLIC Hugging Face Space.
"""

import gradio as gr
import os
from gradio_client import Client

# Get backend URL and token from environment variables
BACKEND_URL = os.getenv("BACKEND_URL", "")  # e.g., "https://username-backend.hf.space"
HF_TOKEN = os.getenv("HF_TOKEN", "")  # Hugging Face token for authentication

if not BACKEND_URL:
    raise ValueError("BACKEND_URL environment variable must be set!")
if not HF_TOKEN:
    raise ValueError("HF_TOKEN environment variable must be set!")

# Initialize backend client
try:
    # For private Spaces, pass token via headers or use HF_TOKEN environment variable
    # Gradio Client automatically uses HF_TOKEN env var if available
    if HF_TOKEN:
        # Set as environment variable for gradio-client to pick up
        os.environ["HF_TOKEN"] = HF_TOKEN
        backend_client = Client(BACKEND_URL)
    else:
        backend_client = Client(BACKEND_URL)
    print(f"Connected to backend at {BACKEND_URL}")
    # Debug: List available API endpoints
    try:
        api_info = backend_client.view_api()
        print(f"Available API endpoints: {list(api_info.keys()) if isinstance(api_info, dict) else 'Could not list APIs'}")
    except:
        pass
except Exception as e:
    print(f"Warning: Could not connect to backend: {e}")
    print(f"Make sure BACKEND_URL and HF_TOKEN are set correctly.")
    print(f"BACKEND_URL: {BACKEND_URL}")
    print(f"HF_TOKEN set: {bool(HF_TOKEN)}")
    backend_client = None

def add_sentences_ui(sentences_text):
    """UI handler for adding sentences."""
    if not backend_client:
        return "❌ Backend not available. Please check configuration.", "**Error:** Backend connection failed."
    
    try:
        result = backend_client.predict(
            sentences_text,
            api_name="/api_add_sentences"
        )
        if isinstance(result, dict):
            vocab_info = result.get("vocab_info", {})
            vocab_text = f"**Current Vocabulary:** {vocab_info.get('vocab_size', 0)} words\n**Corpus:** {vocab_info.get('corpus_size', 0)} sentences\n**Trained:** {vocab_info.get('trained_size', 0)} sentences"
            return result.get("status", "Unknown status"), vocab_text
        else:
            return str(result), "**Error:** Unexpected response format."
    except Exception as e:
        return f"❌ Error: {str(e)}", "**Error:** Could not connect to backend."

def train_brain_ui(epochs, progress=gr.Progress()):
    """UI handler for training."""
    if not backend_client:
        yield "❌ Backend not available. Please check configuration."
        return
    
    try:
        yield "πŸ”„ Training in progress... Please wait..."
        result = backend_client.predict(
            int(epochs),
            api_name="/api_train"
        )
        if isinstance(result, str):
            yield result
        elif isinstance(result, dict):
            yield result.get("status", "Training completed.")
        else:
            yield str(result)
    except Exception as e:
        yield f"❌ Error: {str(e)}"

def run_stream_ui(seed_word, steps, coupling_gain, transmission_threshold):
    """UI handler for stream simulation."""
    if not backend_client:
        return None, "❌ Backend not available. Please check configuration."
    
    try:
        result = backend_client.predict(
            seed_word,
            int(steps),
            float(coupling_gain),
            float(transmission_threshold),
            api_name="/api_run_stream"
        )
        # Gradio returns tuple/list for multiple outputs
        if isinstance(result, (list, tuple)) and len(result) >= 2:
            return result[0], result[1]
        elif isinstance(result, dict):
            return result.get("image"), result.get("text", "")
        else:
            return None, f"Unexpected response: {result}"
    except Exception as e:
        return None, f"❌ Error: {str(e)}"

def clear_brain_ui():
    """UI handler for clearing the brain."""
    if not backend_client:
        return "❌ Backend not available. Please check configuration.", "**Error:** Backend connection failed."
    
    try:
        result = backend_client.predict(api_name="/api_clear_brain")
        if isinstance(result, dict):
            vocab_info = result.get("vocab_info", {})
            vocab_text = f"**Current Vocabulary:** {vocab_info.get('vocab_size', 0)} words\n**Corpus:** {vocab_info.get('corpus_size', 0)} sentences\n**Trained:** {vocab_info.get('trained_size', 0)} sentences"
            return result.get("status", "Cleared."), vocab_text
        else:
            return str(result), "**Error:** Unexpected response format."
    except Exception as e:
        return f"❌ Error: {str(e)}", "**Error:** Could not connect to backend."

# Create Frontend Interface
with gr.Blocks(title="RAM-P - Interactive Learning") as frontend_app:
    gr.Markdown("""
    # 🧠 RAM-P - Interactive Learning
    
    **Start with a blank brain and teach it by adding sentences!**
    
    ### How to use:
    1. **Add Sentences**: Input sentences (one per line) to build vocabulary and corpus
    2. **Train Brain**: Click "Train Brain" to let it learn associations from your sentences
    3. **Run Stream**: Enter a seed word and watch the stream of consciousness flow!
    """)
    
    with gr.Tabs():
        with gr.Tab("1. Add Sentences"):
            gr.Markdown("### Add sentences to teach the brain")
            gr.Markdown("Enter sentences (one per line). The brain will extract vocabulary from these sentences.")
            
            sentences_input = gr.Textbox(
                label="Sentences",
                placeholder="the monkey ate a banana\nprogrammer wrote code\nastronomer saw stars",
                lines=10,
                info="Enter sentences, one per line"
            )
            
            add_btn = gr.Button("Add Sentences", variant="primary")
            add_output = gr.Textbox(label="Status", interactive=False)
            vocab_display = gr.Markdown(label="Vocabulary Info")
            
            add_btn.click(
                fn=add_sentences_ui,
                inputs=sentences_input,
                outputs=[add_output, vocab_display]
            )
        
        with gr.Tab("2. Train Brain"):
            gr.Markdown("### Train the brain on your corpus")
            gr.Markdown("The brain will learn associations between words that appear together in sentences. **Incremental learning**: Adding new sentences expands the brain without losing previous knowledge.")
            
            with gr.Row():
                with gr.Column(scale=2):
                    epochs_slider = gr.Slider(
                        label="Training Epochs",
                        minimum=1,
                        maximum=10,
                        value=2,
                        step=1,
                        info="Number of times to go through the corpus"
                    )
                    
                    train_btn = gr.Button("Train Brain", variant="primary", size="lg")
                    train_output = gr.Markdown(label="Training Status", value="Ready to train. Click 'Train Brain' to start.")
                    
                    train_btn.click(
                        fn=train_brain_ui,
                        inputs=epochs_slider,
                        outputs=train_output
                    )
                
                with gr.Column(scale=1):
                    gr.Markdown("### Brain Management")
                    clear_btn = gr.Button("Clear Brain", variant="stop", size="lg")
                    clear_output = gr.Markdown(label="Clear Status")
                    clear_vocab_display = gr.Markdown(label="Vocabulary Info")
                    
                    clear_btn.click(
                        fn=clear_brain_ui,
                        inputs=None,
                        outputs=[clear_output, clear_vocab_display]
                    )
        
        with gr.Tab("3. Stream of Consciousness"):
            gr.Markdown("### Run stream of consciousness simulation")
            gr.Markdown("Enter a seed word and watch how the brain's thoughts flow and associate.")
            
            with gr.Row():
                with gr.Column(scale=1):
                    seed_word_input = gr.Textbox(
                        label="Seed Word",
                        value="",
                        placeholder="Enter a word from your vocabulary...",
                        info="The initial concept to inject"
                    )
                    
                    steps_slider = gr.Slider(
                        label="Simulation Steps",
                        minimum=100,
                        maximum=1000,
                        value=400,
                        step=50
                    )
                    
                    coupling_slider = gr.Slider(
                        label="Coupling Gain",
                        minimum=0.0,
                        maximum=200.0,
                        value=80.0,
                        step=5.0,
                        info="How strongly thoughts pull on each other"
                    )
                    
                    threshold_slider = gr.Slider(
                        label="Transmission Threshold",
                        minimum=0.01,
                        maximum=0.5,
                        value=0.05,
                        step=0.01,
                        info="Minimum activation for influence"
                    )
                    
                    stream_btn = gr.Button("Run Stream", variant="primary", size="lg")
                
                with gr.Column(scale=2):
                    stream_image = gr.Image(
                        label="Stream of Consciousness Visualization",
                        type="filepath"
                    )
                    stream_text = gr.Markdown(label="Narrative Chain")
            
            stream_btn.click(
                fn=run_stream_ui,
                inputs=[seed_word_input, steps_slider, coupling_slider, threshold_slider],
                outputs=[stream_image, stream_text]
            )
    
    gr.Markdown("""
    ---
    **Tips:**
    - Add diverse sentences to build a rich vocabulary
    - More training epochs = stronger associations
    - Try different seed words to see different thought patterns
    """)

if __name__ == "__main__":
    # Let Gradio use default port (7860) or GRADIO_SERVER_PORT env var
    # Don't hardcode port - let Hugging Face Spaces handle it
    frontend_app.launch()