Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,90 +1,75 @@
|
|
| 1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
| 2 |
import os
|
| 3 |
-
from
|
| 4 |
-
from retrain_from_feedback import train_evo
|
| 5 |
-
from logger import log_feedback
|
| 6 |
|
| 7 |
-
|
| 8 |
-
option1 = gr.Textbox(label="π
°οΈ Option 1", placeholder="Enter the first option")
|
| 9 |
-
option2 = gr.Textbox(label="π
±οΈ Option 2", placeholder="Enter the second option")
|
| 10 |
-
choice = gr.Radio(["Evo", "GPT"], label="π³οΈ Who was better?", info="Optional β fuels evolution", type="value")
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
def run_inference(q, o1, o2, winner):
|
| 18 |
-
evo_answer, evo_reasoning = evo_infer(q, o1, o2)
|
| 19 |
-
gpt_answer = gpt_infer(q, o1, o2)
|
| 20 |
-
context = f"Question: {q}\nOptions: {o1}, {o2}"
|
| 21 |
-
log_feedback(q, o1, o2, context, evo_answer, gpt_answer, evo_reasoning, winner)
|
| 22 |
-
conv_log = f"π€ {q}\nπ
°οΈ {o1} | π
±οΈ {o2}\nπ€ Evo: {evo_answer} ({evo_reasoning})\nπ§ GPT: {gpt_answer}"
|
| 23 |
-
return evo_answer, gpt_answer, conv_log
|
| 24 |
-
|
| 25 |
-
def clear():
|
| 26 |
-
return "", "", "", None, "", "", ""
|
| 27 |
-
|
| 28 |
-
def export_csv():
|
| 29 |
-
return gr.File("feedback_log.csv")
|
| 30 |
-
|
| 31 |
-
def retrain():
|
| 32 |
-
train_evo()
|
| 33 |
-
return "π Evo model reloaded."
|
| 34 |
-
|
| 35 |
-
with gr.Blocks(theme=gr.themes.Soft(), css="""
|
| 36 |
-
body { background-color: #f3f6fb; font-family: 'Segoe UI', sans-serif; }
|
| 37 |
-
.gradio-container { max-width: 1024px; margin: auto; }
|
| 38 |
-
.gr-box { box-shadow: 0 4px 16px rgba(0,0,0,0.1); border-radius: 12px; padding: 16px; transition: all 0.3s ease-in-out; }
|
| 39 |
-
.gr-button { border-radius: 8px; font-weight: 600; transition: all 0.2s ease-in-out; }
|
| 40 |
-
.gr-button:hover { transform: scale(1.03); background-color: #e6f2ff; }
|
| 41 |
-
.gr-textbox, .gr-radio { border-radius: 8px; }
|
| 42 |
-
""") as demo:
|
| 43 |
-
|
| 44 |
-
gr.Markdown("""
|
| 45 |
-
<h1 style="font-size: 2.2em;">π§ EvoRAG β Real-Time Reasoning AI</h1>
|
| 46 |
-
<p><b>Built Different. Learns Live. Evolves from You.</b></p>
|
| 47 |
-
<div style="margin-top: 10px; font-size: 0.9em;">
|
| 48 |
-
<ul>
|
| 49 |
-
<li>π <b>Why Evo?</b></li>
|
| 50 |
-
<li>βοΈ Learns from your input β evolves in real time</li>
|
| 51 |
-
<li>βοΈ Adaptive architecture (changes #layers, memory, etc.)</li>
|
| 52 |
-
<li>βοΈ Tiny model (~13Mβ28M params) vs GPT-3.5 (175B)</li>
|
| 53 |
-
<li>βοΈ Runs on CPU or low-end GPUs</li>
|
| 54 |
-
<li>βοΈ Transparent architecture: shows how it thinks</li>
|
| 55 |
-
<li>βοΈ Can be deployed, fine-tuned, and evolved per user/domain</li>
|
| 56 |
-
</ul>
|
| 57 |
</div>
|
| 58 |
-
|
| 59 |
|
| 60 |
with gr.Row():
|
| 61 |
with gr.Column():
|
| 62 |
-
|
| 63 |
-
option1.
|
| 64 |
-
option2.
|
| 65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
|
| 67 |
with gr.Column():
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
evo_stats.render()
|
| 73 |
|
| 74 |
-
|
| 75 |
-
evo_out.render()
|
| 76 |
-
gpt_out.render()
|
| 77 |
|
| 78 |
-
|
| 79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
|
| 81 |
-
|
| 82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
|
| 89 |
if __name__ == "__main__":
|
| 90 |
-
demo.launch(
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from inference import evo_chat_predict, get_model_config, get_system_stats, get_gpt_response
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import csv
|
| 5 |
import os
|
| 6 |
+
from datetime import datetime
|
|
|
|
|
|
|
| 7 |
|
| 8 |
+
feedback_log = []
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
+
with gr.Blocks(theme=gr.themes.Base(), css="body { background-color: #0f0f0f; color: #f5f5f5; }") as demo:
|
| 11 |
+
with gr.Column():
|
| 12 |
+
gr.HTML("""
|
| 13 |
+
<div style="padding: 10px; border-radius: 12px; background: #1f1f2e; color: #fff; font-size: 16px; margin-bottom: 12px;">
|
| 14 |
+
<b>Why Evo?</b> π Evo is not just another AI. It evolves. It learns from you. It adapts its architecture live based on feedback. No retraining labs, no frozen weights. This is <u>live reasoning meets evolution</u>. <span style="color:#88ffcc">Built to outperform, built to survive.</span>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
</div>
|
| 16 |
+
""")
|
| 17 |
|
| 18 |
with gr.Row():
|
| 19 |
with gr.Column():
|
| 20 |
+
query = gr.Textbox(label="π§ Your Question", placeholder="e.g. What should you do if thereβs a fire?", lines=1)
|
| 21 |
+
option1 = gr.Textbox(label="β Option 1", placeholder="Enter the first option")
|
| 22 |
+
option2 = gr.Textbox(label="β Option 2", placeholder="Enter the second option")
|
| 23 |
+
feedback = gr.Radio(["Evo", "GPT"], label="π§ Who was better?", info="Optional β fuels evolution", interactive=True)
|
| 24 |
+
evo_btn = gr.Button("β‘ Ask Evo", elem_id="evo-btn")
|
| 25 |
+
retrain_btn = gr.Button("π Retrain Evo", elem_id="retrain-btn")
|
| 26 |
+
clear_btn = gr.Button("π§Ή Clear")
|
| 27 |
+
export_btn = gr.Button("π€ Export Feedback CSV")
|
| 28 |
|
| 29 |
with gr.Column():
|
| 30 |
+
evo_stats = gr.Textbox(label="π Evo Stats", interactive=False)
|
| 31 |
+
evo_box = gr.Textbox(label="π§ Evo", interactive=False)
|
| 32 |
+
gpt_box = gr.Textbox(label="π€ GPT-3.5", interactive=False)
|
| 33 |
+
status_box = gr.Textbox(label="π΅ Status", interactive=False)
|
|
|
|
| 34 |
|
| 35 |
+
convo = gr.Dataframe(headers=["Question", "Option 1", "Option 2", "Answer", "Confidence", "Reasoning", "Context"], interactive=False, wrap=True, label="π Conversation History")
|
|
|
|
|
|
|
| 36 |
|
| 37 |
+
def ask_evo(q, opt1, opt2, hist):
|
| 38 |
+
result = evo_chat_predict(hist, q, [opt1, opt2])
|
| 39 |
+
evo_text = f"Answer: {result['answer']} (Confidence: {result['confidence']})\n\nReasoning: {result['reasoning']}"
|
| 40 |
+
evo_box.update(value=evo_text)
|
| 41 |
+
gpt_box.update(value=get_gpt_response(q))
|
| 42 |
+
stats = get_model_config()
|
| 43 |
+
evo_stats.update(value=f"Layers: {stats['num_layers']} | Heads: {stats['num_heads']} | FFN: {stats['ffn_dim']} | Memory: {stats['memory_enabled']} | Phase: V2.2")
|
| 44 |
+
return evo_text, result['context_used'], result['answer'], result['confidence'], result['reasoning'], result['context_used']
|
| 45 |
+
|
| 46 |
+
def retrain_evo():
|
| 47 |
+
status_box.update(value="Retraining Evo... (simulated)")
|
| 48 |
+
|
| 49 |
+
def clear_fields():
|
| 50 |
+
query.update(value="")
|
| 51 |
+
option1.update(value="")
|
| 52 |
+
option2.update(value="")
|
| 53 |
+
evo_box.update(value="")
|
| 54 |
+
gpt_box.update(value="")
|
| 55 |
+
status_box.update(value="")
|
| 56 |
|
| 57 |
+
def log_feedback_to_csv():
|
| 58 |
+
if feedback_log:
|
| 59 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 60 |
+
filepath = f"feedback_{timestamp}.csv"
|
| 61 |
+
with open(filepath, "w", newline="") as f:
|
| 62 |
+
writer = csv.writer(f)
|
| 63 |
+
writer.writerow(["Question", "Option 1", "Option 2", "Answer", "Confidence", "Reasoning", "Context"])
|
| 64 |
+
writer.writerows(feedback_log)
|
| 65 |
+
status_box.update(value=f"β
Feedback exported to {filepath}")
|
| 66 |
+
else:
|
| 67 |
+
status_box.update(value="β οΈ No feedback to export.")
|
| 68 |
|
| 69 |
+
evo_btn.click(fn=ask_evo, inputs=[query, option1, option2, convo], outputs=[evo_box, gpt_box, convo])
|
| 70 |
+
retrain_btn.click(fn=retrain_evo, inputs=[], outputs=[status_box])
|
| 71 |
+
clear_btn.click(fn=clear_fields, inputs=[], outputs=[])
|
| 72 |
+
export_btn.click(fn=log_feedback_to_csv, inputs=[], outputs=[status_box])
|
| 73 |
|
| 74 |
if __name__ == "__main__":
|
| 75 |
+
demo.launch()
|