File size: 3,823 Bytes
64a35d4
0c6642e
 
 
 
 
64a35d4
8ad2926
e480aa6
8ad2926
 
 
 
 
e480aa6
8ad2926
0c6642e
 
64a35d4
0c6642e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64a35d4
0c6642e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64a35d4
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import streamlit as st
import threading
import os
import litellm
from litellm import completion
from dotenv import load_dotenv

# Set OpenAI API key
os.environ["OPENAI_API_KEY"] = "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"
os.environ["HUGGINGFACE_API_KEY"] = "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"
os.environ["TOGETHERAI_API_KEY"] = "cad84f39be62a8e36fdd846152dbb18abddef0aefcd921e82e287b4c228ac3e1"
os.environ["OPENROUTER_API_KEY"] = "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"

# for openai
os.environ['OPENAI_API_BASE'] = "https://openrouter.ai/api/v1"

# load .env, so litellm reads from .env
load_dotenv()

litellm.token = "5fdb5efa-9307-40ed-b824-1c73a1613030"

models = []
provider_models_map  = litellm.models_by_provider
for provider in provider_models_map:
    print(provider)
    for model in provider_models_map[provider]:
        print(provider_models_map[provider])
        models.append(provider+"/" + model)

# Function to get model outputs
def get_model_output(prompt, model_name):
    try:
        messages = [
            {"role": "user", "content": prompt},
        ]
        response = completion(messages=messages, model=model_name)
        
        return response['choices'][0]['message']['content']
    except Exception as e:
        return f"got error calling LLM API {e}"

# Function to get model outputs
def get_model_output_thread(prompt, model_name, outputs, idx):
    output = get_model_output(prompt, model_name)
    outputs[idx] = output

# Streamlit app
def main():
    keys = {}
    st.title("LiteLLM Playground")
    st.markdown("[LiteLLM - one package for CodeLlama, Llama2 Anthropic, Cohere, OpenAI, Replicate](https://github.com/BerriAI/litellm/)")
    st.markdown("View Request Logs + Manage keys (Optional) [here:](https://admin.litellm.ai/5fdb5efa-9307-40ed-b824-1c73a1613030)")

    # Sidebar for user input
    with st.sidebar:
        st.header("User Settings")
            # List of models to test
        model_names = models # Add your model names here

        # Dropdowns for model selection
        selected_models = []
        for i in range(1):
            selected_model = st.selectbox(f"Select Model {i+1}", model_names, index=i)
            selected_models.append(selected_model)
                
            provider = selected_model.split("/")[0]
            key_name = f"{provider.upper()}_API_KEY"
            api_key = st.text_input(f"Enter your {key_name}", type="password", key=i)
            keys[key_name] = api_key
        set_keys_button = st.button("Set API Keys")
    
    if set_keys_button:
        for key in keys:
            if os.environ.get(key) != None: # if key not set in .env
                os.environ[key] = keys[key]
        st.success("API keys have been set.")

    st.header("User Input")
    prompt = st.text_area("Enter your prompt here:")
    submit_button = st.button("Submit")

    # Main content area to display model outputs
    st.header("Model Outputs")
    
    cols = st.columns(len(selected_models))  # Create columns
    outputs = [""] * len(selected_models)  # Initialize outputs list with empty strings

    threads = []
    if submit_button and prompt:
        for idx, model_name in enumerate(selected_models):
            thread = threading.Thread(target=get_model_output_thread, args=(prompt, model_name, outputs, idx))
            threads.append(thread)
            thread.start()

        for thread in threads:
            thread.join()

    # Display text areas and fill with outputs if available
    for idx, model_name in enumerate(selected_models):
        with cols[idx]:
            st.text_area(label=f"{model_name}", value=outputs[idx], height=300, key=f"output_{model_name}_{idx}")  # Use a unique key

if __name__ == "__main__":
    main()