File size: 2,606 Bytes
f85709d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2547205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import gradio as gr
import openai
import pandas as pd
import numpy as np

# Example API key setup (for testing)
openai.api_key = "YOUR_API_KEY"

def evaluate_prompt(model_name, prompt_text):
    """
    Runs a single evaluation against OpenAI or Anthropic API
    Returns text output and token count
    """
    if model_name.lower() == "gpt-4":
        response = openai.ChatCompletion.create(
            model="gpt-4",
            messages=[{"role":"user","content":prompt_text}],
            temperature=0.5
        )
        output_text = response['choices'][0]['message']['content']
        tokens = response['usage']['total_tokens']
        return f"Output:\n{output_text}\n\nTokens used: {tokens}"
    else:
        return "Model not supported yet"

# Gradio UI
with gr.Blocks() as demo:
    gr.Markdown("## MotionEval: AI Model Evaluation MVP")
    
    model_name = gr.Textbox(label="Model Name (e.g. GPT-4)")
    prompt_text = gr.Textbox(label="Prompt", lines=5)
    
    run_button = gr.Button("Run Evaluation")
    output = gr.Textbox(label="Result", lines=15)
    
    run_button.click(fn=evaluate_prompt, inputs=[model_name, prompt_text], outputs=output)

demo.launch()
import pandas as pd

def batch_evaluate(file, model_name):
    df = pd.read_csv(file.name)  # CSV must have a column called 'prompt'
    results = []
    for idx, row in df.iterrows():
        prompt_text = row['prompt']
        response = openai.ChatCompletion.create(
            model=model_name,
            messages=[{"role":"user","content":prompt_text}],
            temperature=0.5
        )
        results.append({
            'prompt': prompt_text,
            'model': model_name,
            'response': response['choices'][0]['message']['content']
        })
    return pd.DataFrame(results)

with gr.Blocks() as demo:
    csv_input = gr.File(label="Upload CSV with 'prompt' column")
    model_input = gr.Textbox(label="Model Name")
    run_button = gr.Button("Run Batch Eval")
    output_table = gr.Dataframe(headers=["prompt", "model", "response"])
    
    run_button.click(batch_evaluate, inputs=[csv_input, model_input], outputs=output_table)

demo.launch()
import io

def results_to_csv(df):
    buffer = io.StringIO()
    df.to_csv(buffer, index=False)
    buffer.seek(0)
    return buffer

download_btn = gr.Button("Download CSV")
download_file = gr.File()

download_btn.click(results_to_csv, inputs=output_table, outputs=download_file)
def metrics(df):
    df['length'] = df['response'].apply(len)
    summary = df.groupby('model').agg(avg_length=('length','mean')).reset_index()
    return summary