File size: 1,523 Bytes
087adcb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import gradio as gr
from response_scoring import score_response
from roi_calculator import calculate_roi, Model
from semantic_memory import SemanticMemory

memory_system = SemanticMemory()

class Scenario:
    def __init__(self, expected, context):
        self.expected = expected
        self.context = context

def evaluate(ai_response, scenario_text, expected_text,
             usage, current_cost, recommended_cost):
    
    scenario = Scenario(expected_text, scenario_text)
    scores = score_response(ai_response, scenario)
    
    current_model = Model("CurrentModel", current_cost)
    recommended_model = Model("RecommendedModel", recommended_cost)
    roi = calculate_roi(usage, current_model, recommended_model)
    
    memory_system.store_memory(f"Scenario: {scenario_text}\nResponse: {ai_response}")
    retrieved = memory_system.retrieve_relevant(scenario_text)
    
    return scores, roi, retrieved

iface = gr.Interface(
    fn=evaluate,
    inputs=[
        gr.Textbox(label="AI Response", lines=3),
        gr.Textbox(label="Scenario Context", lines=2),
        gr.Textbox(label="Expected Output", lines=2),
        gr.Number(label="Customer Usage per Day"),
        gr.Number(label="Current Model Cost per Call ($)"),
        gr.Number(label="Recommended Model Cost per Call ($)")
    ],
    outputs=[
        gr.JSON(label="Response Scores"),
        gr.JSON(label="ROI Calculation"),
        gr.JSON(label="Relevant Memories")
    ],
    title="AI Evaluator + ROI + Memory Demo"
)

iface.launch()