File size: 9,216 Bytes
1e9e306
 
0657895
09e6d66
c7678f3
 
 
 
 
 
 
 
515b961
c7678f3
5340d71
c7678f3
 
 
314d724
c7678f3
 
 
1e9e306
 
 
 
 
 
c7678f3
 
 
 
 
 
 
314d724
803c283
 
c7678f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314d724
c7678f3
 
 
 
ec9b863
c7678f3
 
 
 
 
3eaea0f
 
ec9b863
c7678f3
ec9b863
c7678f3
 
 
 
 
 
314d724
c7678f3
 
 
 
 
 
 
 
 
 
 
314d724
c7678f3
314d724
c7678f3
3eaea0f
ec9b863
09e6d66
0657895
 
5da311e
0657895
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5da311e
0657895
 
 
 
 
 
5da311e
0657895
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5da311e
 
0657895
5da311e
 
 
 
 
 
 
 
 
 
 
0657895
 
 
 
 
 
 
 
 
 
 
 
 
5da311e
0657895
5da311e
0657895
5da311e
0657895
5da311e
0657895
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5da311e
 
 
0657895
 
 
 
 
 
 
 
 
 
5da311e
 
 
 
 
 
 
0657895
 
5da311e
 
 
 
 
 
 
 
 
 
0657895
 
 
 
5da311e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
# app.py

'''import gradio as gr
import pandas as pd
from inference import (
    evo_chat_predict,
    get_gpt_response,
    get_model_config,
    get_system_stats,
    retrain_from_feedback_csv,
    load_model,
)
import os
import csv

FEEDBACK_LOG = "feedback_log.csv"

# 🧠 Ask Evo
def ask_evo(question, option1, option2, history, user_vote):
    options = [option1.strip(), option2.strip()]
    result = evo_chat_predict(history, question.strip(), options)

    # Create feedback_log.csv with headers if it doesn't exist
    if not os.path.exists(FEEDBACK_LOG):
        with open(FEEDBACK_LOG, "w", encoding="utf-8", newline="") as f:
            writer = csv.writer(f)
            writer.writerow(["question", "option1", "option2", "evo_answer", "confidence", "reasoning", "context", "vote"])

    row = {
        "question": question.strip(),
        "option1": option1.strip(),
        "option2": option2.strip(),
        "evo_answer": result["answer"],
        "confidence": result["confidence"],
        "reasoning": result["reasoning"],
        "context": result["context_used"],
        "vote": user_vote.strip() if user_vote else ""

    }

    # Log feedback
    with open(FEEDBACK_LOG, "a", newline='', encoding="utf-8") as f:
        writer = csv.DictWriter(f, fieldnames=row.keys())
        writer.writerow(row)

    # Prepare outputs
    evo_output = f"Answer: {row['evo_answer']} (Confidence: {row['confidence']})\n\nReasoning: {row['reasoning']}\n\nContext used: {row['context']}"
    gpt_output = get_gpt_response(question)
    history.append(row)

    stats = get_model_config()
    sys_stats = get_system_stats()

    stats_text = f"Layers: {stats.get('num_layers', '?')} | Heads: {stats.get('num_heads', '?')} | FFN: {stats.get('ffn_dim', '?')} | Memory: {stats.get('memory_enabled', '?')} | Accuracy: {stats.get('accuracy', '?')}"
    sys_text = f"Device: {sys_stats['device']} | CPU: {sys_stats['cpu_usage_percent']}% | RAM: {sys_stats['memory_used_gb']}GB / {sys_stats['memory_total_gb']}GB | GPU: {sys_stats['gpu_name']} ({sys_stats['gpu_memory_used_gb']}GB / {sys_stats['gpu_memory_total_gb']}GB)"

    return evo_output, gpt_output, stats_text, sys_text, history

# πŸ” Manual retrain button
def retrain_evo():
    msg = retrain_from_feedback_csv()
    load_model(force_reload=True)
    return msg

# πŸ“€ Export feedback
def export_feedback():
    if not os.path.exists(FEEDBACK_LOG):
        return pd.DataFrame()
    return pd.read_csv(FEEDBACK_LOG)

# 🧹 Clear
def clear_all():
    return "", "", "", "", [], None

# πŸ–ΌοΈ UI
with gr.Blocks(title="🧠 Evo – Reasoning AI") as demo:
    gr.Markdown("## Why Evo? πŸš€ Evo is not just another AI. It evolves. It learns from you. It adapts its architecture live based on feedback.\n\nNo retraining labs, no frozen weights. This is live reasoning meets evolution. Built to outperform, built to survive.")

    with gr.Row():
        question = gr.Textbox(label="🧠 Your Question", placeholder="e.g. Why is the sky blue?")
    with gr.Row():
        option1 = gr.Textbox(label="❌ Option 1")
        option2 = gr.Textbox(label="❌ Option 2")

    with gr.Row():
        with gr.Column():
            evo_ans = gr.Textbox(label="🧠 Evo", lines=6)
        with gr.Column():
            gpt_ans = gr.Textbox(label="πŸ€– GPT-3.5", lines=6)

    with gr.Row():
        stats = gr.Textbox(label="πŸ“Š Evo Stats")
        system = gr.Textbox(label="πŸ”΅ Status")

    evo_radio = gr.Radio(["Evo", "GPT"], label="🧠 Who was better?", info="Optional – fuels evolution")

    history = gr.State([])

    with gr.Row():
        ask_btn = gr.Button("⚑ Ask Evo")
        retrain_btn = gr.Button("πŸ” Retrain Evo")
        clear_btn = gr.Button("🧹 Clear")
        export_btn = gr.Button("πŸ“€ Export Feedback CSV")

    export_table = gr.Dataframe(label="πŸ“œ Conversation History")

    ask_btn.click(fn=ask_evo, inputs=[question, option1, option2, history, evo_radio], outputs=[evo_ans, gpt_ans, stats, system, history])
    retrain_btn.click(fn=retrain_evo, inputs=[], outputs=[stats])
    clear_btn.click(fn=clear_all, inputs=[], outputs=[question, option1, option2, evo_ans, gpt_ans, stats, system, history, evo_radio])
    export_btn.click(fn=export_feedback, inputs=[], outputs=[export_table])

if __name__ == "__main__":
    demo.launch()
'''

# app.py
# app.py

import gradio as gr
import pandas as pd
import os
import csv

from inference import (
    evo_chat_predict,
    get_gpt_response,
    get_model_config,
    get_system_stats,
    retrain_from_feedback_csv,
    load_model,
)

GENOME_LOG = "genome_log.csv"
FEEDBACK_LOG = "feedback_log.csv"

# 🧠 Ask Evo
def ask_evo(question, option1, option2, history, user_vote):
    options = [option1.strip(), option2.strip()]
    result = evo_chat_predict(history, question.strip(), options)

    # Create feedback_log.csv if it doesn't exist
    if not os.path.exists(FEEDBACK_LOG):
        with open(FEEDBACK_LOG, "w", encoding="utf-8", newline="") as f:
            writer = csv.writer(f)
            writer.writerow(["question", "option1", "option2", "evo_answer", "confidence", "reasoning", "context", "vote"])

    row = {
        "question": question.strip(),
        "option1": option1.strip(),
        "option2": option2.strip(),
        "evo_answer": result["answer"],
        "confidence": result["confidence"],
        "reasoning": result["reasoning"],
        "context": result["context_used"],
        "vote": user_vote.strip() if user_vote else ""
    }

    with open(FEEDBACK_LOG, "a", newline='', encoding="utf-8") as f:
        writer = csv.DictWriter(f, fieldnames=row.keys())
        writer.writerow(row)

    evo_output = f"Answer: {row['evo_answer']} (Confidence: {row['confidence']})\n\nReasoning: {row['reasoning']}\n\nContext used: {row['context']}"
    gpt_output = get_gpt_response(question)
    history.append(row)

    stats = get_model_config()
    sys_stats = get_system_stats()

    stats_text = f"Layers: {stats.get('num_layers', '?')} | Heads: {stats.get('num_heads', '?')} | FFN: {stats.get('ffn_dim', '?')} | Memory: {stats.get('memory_enabled', '?')} | Accuracy: {stats.get('accuracy', '?')}"
    sys_text = f"Device: {sys_stats['device']} | CPU: {sys_stats['cpu_usage_percent']}% | RAM: {sys_stats['memory_used_gb']}GB / {sys_stats['memory_total_gb']}GB | GPU: {sys_stats['gpu_name']} ({sys_stats['gpu_memory_used_gb']}GB / {sys_stats['gpu_memory_total_gb']}GB)"

    genome_df = get_top_genomes()
    return evo_output, gpt_output, stats_text, sys_text, history, genome_df

# πŸ“Š Top genome stats
def get_top_genomes(n=5):
    if not os.path.exists(GENOME_LOG):
        return pd.DataFrame()
    try:
        df = pd.read_csv(GENOME_LOG)
        if "score" in df.columns:
            df = df.sort_values(by="score", ascending=False)
        return df.tail(n)
    except Exception:
        return pd.DataFrame()

# πŸ” Manual retrain button
def retrain_evo():
    msg = retrain_from_feedback_csv()
    load_model(force_reload=True)
    return msg

# πŸ“€ Export feedback
def export_feedback():
    if not os.path.exists(FEEDBACK_LOG):
        return pd.DataFrame()
    return pd.read_csv(FEEDBACK_LOG)

# 🧹 Clear UI
def clear_all():
    return "", "", "", "", "", "", pd.DataFrame(), {}, pd.DataFrame()

# πŸ–ΌοΈ UI Layout
with gr.Blocks(title="🧠 Evo – Reasoning AI") as demo:
    gr.Markdown("## πŸš€ Evo is not just another AI. It evolves. It learns from you. It mutates based on feedback.\n\nNo retraining labs. No frozen weights. This is live reasoning meets evolution.")

    with gr.Row():
        question = gr.Textbox(label="🧠 Your Question", placeholder="e.g. Why is the sky blue?")
    with gr.Row():
        option1 = gr.Textbox(label="❌ Option 1")
        option2 = gr.Textbox(label="❌ Option 2")

    with gr.Row():
        with gr.Column():
            evo_ans = gr.Textbox(label="🧠 Evo", lines=6)
        with gr.Column():
            gpt_ans = gr.Textbox(label="πŸ€– GPT-3.5", lines=6)

    with gr.Row():
        stats = gr.Textbox(label="πŸ“Š Evo Stats")
        system = gr.Textbox(label="πŸ”΅ System Status")

    evo_radio = gr.Radio(["Evo", "GPT"], label="🧠 Who was better?", info="Optional – leave blank if both were wrong")

    history = gr.State([])

    with gr.Row():
        ask_btn = gr.Button("⚑ Ask Evo")
        retrain_btn = gr.Button("πŸ” Retrain Evo")
        clear_btn = gr.Button("🧹 Clear")
        export_btn = gr.Button("πŸ“€ Export Feedback CSV")

    export_table = gr.Dataframe(label="πŸ“œ Conversation History")
    genome_table = gr.Dataframe(label="🧬 Top Genomes")

    ask_btn.click(
        fn=ask_evo,
        inputs=[question, option1, option2, history, evo_radio],
        outputs=[evo_ans, gpt_ans, stats, system, history, genome_table]
    )

    retrain_btn.click(fn=retrain_evo, inputs=[], outputs=[stats])

    clear_btn.click(
        fn=clear_all,
        inputs=[],
        outputs=[
            question, option1, option2, evo_ans, gpt_ans,
            stats, export_table, system, genome_table
        ]
    )

    export_btn.click(fn=export_feedback, inputs=[], outputs=[export_table])

if __name__ == "__main__":
    demo.launch()