Spaces:
Sleeping
Sleeping
Commit ·
0aca678
1
Parent(s): b4b5e41
groq llm
Browse files- .env +1 -0
- .gitignore +4 -0
- .vscode/launch.json +16 -0
- LLMCONFIG/ANTROPIC_CLAUDE_CONFIG_LIST.json +35 -0
- LLMCONFIG/BEDROCK_CONFIG_LIST.json +31 -0
- LLMCONFIG/GOOGLE_GEMINI_CONFIG_LIST.json +37 -0
- LLMCONFIG/GROQ_CONFIG_LIST.json +29 -0
- LLMCONFIG/LLM_CONFIG_LIST.json +11 -0
- LLMCONFIG/MISTRAL_AI_CONFIG_LIST.json +37 -0
- LLMCONFIG/OLLAMA_LLM_CONFIG_LIST.json +19 -0
- LLMHandler/llmhandler.py +47 -0
- LLMS/groqllm.py +23 -0
- LLMS/hfllm.py +0 -0
- LLMS/llmconfig.py +13 -0
- agents/assistantagent.py +14 -0
- agents/retrieveassistantagent.py +12 -0
- agents/userproxyagent.py +12 -0
- app.py +36 -0
- requirements.txt +3 -0
- streamlitui/loadui.py +103 -0
- usecases/basicexample.py +30 -0
.env
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
OPENAI_API_KEYi_key= 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
|
.gitignore
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
/.cache
|
| 3 |
+
*.ipynb
|
| 4 |
+
*.pyc
|
.vscode/launch.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": "0.2.0",
|
| 3 |
+
"configurations": [
|
| 4 |
+
{
|
| 5 |
+
"name": "debug streamlit",
|
| 6 |
+
"type": "debugpy",
|
| 7 |
+
"request": "launch",
|
| 8 |
+
"program": "./.venv/Lib/site-packages/streamlit", // /home/xx/tmp/venv/bin/streamlit",
|
| 9 |
+
"args": [
|
| 10 |
+
"run",
|
| 11 |
+
"app.py"
|
| 12 |
+
],
|
| 13 |
+
"justMyCode": false
|
| 14 |
+
}
|
| 15 |
+
]
|
| 16 |
+
}
|
LLMCONFIG/ANTROPIC_CLAUDE_CONFIG_LIST.json
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"model": "claude-3-5-sonnet-20240620",
|
| 4 |
+
"api_key": "your Anthropic API Key goes here",
|
| 5 |
+
"api_type": "anthropic"
|
| 6 |
+
},
|
| 7 |
+
{
|
| 8 |
+
"model": "claude-3-sonnet-20240229",
|
| 9 |
+
"api_key": "your Anthropic API Key goes here",
|
| 10 |
+
"api_type": "anthropic",
|
| 11 |
+
"temperature": 0.5,
|
| 12 |
+
"top_p": 0.2,
|
| 13 |
+
"max_tokens": 10000
|
| 14 |
+
},
|
| 15 |
+
{
|
| 16 |
+
"model":"claude-3-opus-20240229",
|
| 17 |
+
"api_key":"your api key",
|
| 18 |
+
"api_type":"anthropic"
|
| 19 |
+
},
|
| 20 |
+
{
|
| 21 |
+
"model":"claude-2.0",
|
| 22 |
+
"api_key":"your api key",
|
| 23 |
+
"api_type":"anthropic"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"model":"claude-2.1",
|
| 27 |
+
"api_key":"your api key",
|
| 28 |
+
"api_type":"anthropic"
|
| 29 |
+
},
|
| 30 |
+
{
|
| 31 |
+
"model":"claude-3.0-haiku",
|
| 32 |
+
"api_key":"your api key",
|
| 33 |
+
"api_type":"anthropic"
|
| 34 |
+
}
|
| 35 |
+
]
|
LLMCONFIG/BEDROCK_CONFIG_LIST.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"api_type": "bedrock",
|
| 4 |
+
"model": "amazon.titan-text-premier-v1:0",
|
| 5 |
+
"aws_region": "us-east-1",
|
| 6 |
+
"aws_access_key": "",
|
| 7 |
+
"aws_secret_key": "",
|
| 8 |
+
"aws_session_token": "",
|
| 9 |
+
"aws_profile_name": ""
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"api_type": "bedrock",
|
| 13 |
+
"model": "anthropic.claude-3-sonnet-20240229-v1:0",
|
| 14 |
+
"aws_region": "us-east-1",
|
| 15 |
+
"aws_access_key": "",
|
| 16 |
+
"aws_secret_key": "",
|
| 17 |
+
"aws_session_token": "",
|
| 18 |
+
"aws_profile_name": "",
|
| 19 |
+
"temperature": 0.5,
|
| 20 |
+
"topP": 0.2,
|
| 21 |
+
"maxTokens": 250
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"api_type": "bedrock",
|
| 25 |
+
"model": "mistral.mixtral-8x7b-instruct-v0:1",
|
| 26 |
+
"aws_region": "us-east-1",
|
| 27 |
+
"aws_access_key": "",
|
| 28 |
+
"aws_secret_key": "",
|
| 29 |
+
"price": 0.0007
|
| 30 |
+
}
|
| 31 |
+
]
|
LLMCONFIG/GOOGLE_GEMINI_CONFIG_LIST.json
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"model": "gemini-pro",
|
| 4 |
+
"api_type": "google",
|
| 5 |
+
"project_id": "autogen-with-gemini",
|
| 6 |
+
"location": "us-west1"
|
| 7 |
+
},
|
| 8 |
+
{
|
| 9 |
+
"model": "gemini-1.5-pro-001",
|
| 10 |
+
"api_type": "google",
|
| 11 |
+
"project_id": "autogen-with-gemini",
|
| 12 |
+
"location": "us-west1"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"model": "gemini-1.5-pro",
|
| 16 |
+
"api_type": "google",
|
| 17 |
+
"project_id": "autogen-with-gemini",
|
| 18 |
+
"location": "us-west1",
|
| 19 |
+
"google_application_credentials": "autogen-with-gemini-service-account-key.json"
|
| 20 |
+
},
|
| 21 |
+
{
|
| 22 |
+
"model": "gemini-pro-vision",
|
| 23 |
+
"api_type": "google",
|
| 24 |
+
"project_id": "autogen-with-gemini",
|
| 25 |
+
"location": "us-west1"
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"model": "gemini-1.5-flash",
|
| 29 |
+
"api_key": "<REPLACE WITH YOUR GEMINI API KEY>",
|
| 30 |
+
"api_type": "google"
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"model": "gemini-1.5-pro",
|
| 34 |
+
"api_key": "<REPLACE WITH YOUR GEMINI API KEY>",
|
| 35 |
+
"api_type": "google"
|
| 36 |
+
}
|
| 37 |
+
]
|
LLMCONFIG/GROQ_CONFIG_LIST.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
|
| 3 |
+
{
|
| 4 |
+
"model": "llama3-70b-8192",
|
| 5 |
+
"api_key": "your Groq API Key goes here",
|
| 6 |
+
"api_type": "groq"
|
| 7 |
+
},
|
| 8 |
+
{
|
| 9 |
+
"model": "llama3-8b-8192",
|
| 10 |
+
"api_key": "your Groq API Key goes here",
|
| 11 |
+
"api_type": "groq",
|
| 12 |
+
"frequency_penalty": 0.5,
|
| 13 |
+
"max_tokens": 2048,
|
| 14 |
+
"presence_penalty": 0.2,
|
| 15 |
+
"seed": 42,
|
| 16 |
+
"temperature": 0.5,
|
| 17 |
+
"top_p": 0.2
|
| 18 |
+
},
|
| 19 |
+
{
|
| 20 |
+
"model": "Mixtral 8x7b",
|
| 21 |
+
"api_key": "your Groq API Key goes here",
|
| 22 |
+
"api_type": "groq"
|
| 23 |
+
},
|
| 24 |
+
{
|
| 25 |
+
"model": "gemma-7b-it",
|
| 26 |
+
"api_key": "your Groq API Key goes here",
|
| 27 |
+
"api_type": "groq"
|
| 28 |
+
}
|
| 29 |
+
]
|
LLMCONFIG/LLM_CONFIG_LIST.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"model": "Open-Orca/Mistral-7B-OpenOrca",
|
| 4 |
+
"model_client_cls": "CustomModelClient",
|
| 5 |
+
"device": "cuda",
|
| 6 |
+
"n": 1,
|
| 7 |
+
"params": {
|
| 8 |
+
"max_length": 1000
|
| 9 |
+
}
|
| 10 |
+
}
|
| 11 |
+
]
|
LLMCONFIG/MISTRAL_AI_CONFIG_LIST.json
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"model": "open-mistral-7b",
|
| 4 |
+
"api_key": "your Mistral AI API Key goes here",
|
| 5 |
+
"api_type": "mistral"
|
| 6 |
+
},
|
| 7 |
+
{
|
| 8 |
+
"model": "open-mixtral-8x7b",
|
| 9 |
+
"api_key": "your Mistral AI API Key goes here",
|
| 10 |
+
"api_type": "mistral"
|
| 11 |
+
},
|
| 12 |
+
{
|
| 13 |
+
"model": "open-mixtral-8x22b",
|
| 14 |
+
"api_key": "your Mistral AI API Key goes here",
|
| 15 |
+
"api_type": "mistral"
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"model": "mistral-small-latest",
|
| 19 |
+
"api_key": "your Mistral AI API Key goes here",
|
| 20 |
+
"api_type": "mistral"
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
"model": "mistral-medium-latest",
|
| 24 |
+
"api_key": "your Mistral AI API Key goes here",
|
| 25 |
+
"api_type": "mistral"
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"model": "mistral-large-latest",
|
| 29 |
+
"api_key": "your Mistral AI API Key goes here",
|
| 30 |
+
"api_type": "mistral"
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"model": "codestral-latest",
|
| 34 |
+
"api_key": "your Mistral AI API Key goes here",
|
| 35 |
+
"api_type": "mistral"
|
| 36 |
+
}
|
| 37 |
+
]
|
LLMCONFIG/OLLAMA_LLM_CONFIG_LIST.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"model": "llama3.1",
|
| 4 |
+
"api_type": "ollama"
|
| 5 |
+
},
|
| 6 |
+
{
|
| 7 |
+
"model": "llama3.1:8b-instruct-q6_K",
|
| 8 |
+
"api_type": "ollama"
|
| 9 |
+
},
|
| 10 |
+
{
|
| 11 |
+
"model": "mistral-nemo",
|
| 12 |
+
"api_type": "ollama"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"model": "llama3.1:8b",
|
| 16 |
+
"api_type": "ollama",
|
| 17 |
+
"client_host": "http://192.168.0.1:11434"
|
| 18 |
+
}
|
| 19 |
+
]
|
LLMHandler/llmhandler.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from LLMS.groqllm import GroqLLM
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class LLMHandler:
|
| 5 |
+
def __init__(self, user_controls_input):
|
| 6 |
+
self.user_control = user_controls_input
|
| 7 |
+
self.api_type = user_controls_input.get('api_type')
|
| 8 |
+
|
| 9 |
+
def handle_request(self):
|
| 10 |
+
# Dispatch to the appropriate method based on api_type
|
| 11 |
+
handler_method_name = f"{self.api_type}_handler"
|
| 12 |
+
if hasattr(self, handler_method_name):
|
| 13 |
+
handler_method = getattr(self, handler_method_name)
|
| 14 |
+
handler_method()
|
| 15 |
+
else:
|
| 16 |
+
raise ValueError(f"Unsupported API type: {self.api_type}")
|
| 17 |
+
|
| 18 |
+
def groq_handler(self):
|
| 19 |
+
obj_llm_config = GroqLLM(user_controls_input=self.user_control)
|
| 20 |
+
llm_config = obj_llm_config.groq_llm_config()
|
| 21 |
+
return llm_config
|
| 22 |
+
|
| 23 |
+
# def google_handler(self):
|
| 24 |
+
# obj_llm_config = GoogleLLM(user_controls_input=self.user_control)
|
| 25 |
+
# obj_llm_config.google_llm_config()
|
| 26 |
+
|
| 27 |
+
# def mistral_handler(self):
|
| 28 |
+
# obj_llm_config = MistralLLM(user_controls_input=self.user_control)
|
| 29 |
+
# obj_llm_config.mistral_llm_config()
|
| 30 |
+
|
| 31 |
+
# def anthropic_handler(self):
|
| 32 |
+
# obj_llm_config = AnthropicLLM(user_controls_input=self.user_control)
|
| 33 |
+
# obj_llm_config.anthropic_llm_config()
|
| 34 |
+
|
| 35 |
+
# def bedrock_handler(self):
|
| 36 |
+
# obj_llm_config = BedrockLLM(user_controls_input=self.user_control)
|
| 37 |
+
# obj_llm_config.bedrock_llm_config()
|
| 38 |
+
|
| 39 |
+
# def ollama_handler(self):
|
| 40 |
+
# obj_llm_config = OllamaLLM(user_controls_input=self.user_control)
|
| 41 |
+
# obj_llm_config.ollama_llm_config()
|
| 42 |
+
|
| 43 |
+
# def openai_handler(self):
|
| 44 |
+
# obj_llm_config = OpenAILLM(user_controls_input=self.user_control)
|
| 45 |
+
# obj_llm_config.openai_llm_config()
|
| 46 |
+
|
| 47 |
+
|
LLMS/groqllm.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import autogen
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
import streamlit as st
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class GroqLLM:
|
| 8 |
+
def __init__(self, user_controls_input):
|
| 9 |
+
self.user_controls_input = user_controls_input
|
| 10 |
+
|
| 11 |
+
def groq_llm_config(self):
|
| 12 |
+
config_list = [
|
| 13 |
+
{
|
| 14 |
+
"api_type": 'groq',
|
| 15 |
+
"model": self.user_controls_input['selected_model_name'],
|
| 16 |
+
"api_key": st.session_state["api_key"],
|
| 17 |
+
"cache_seed": None
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
llm_config = {"config_list": config_list, "request_timeout": 60}
|
| 22 |
+
st.session_state['llm_config'] = llm_config
|
| 23 |
+
return llm_config
|
LLMS/hfllm.py
ADDED
|
File without changes
|
LLMS/llmconfig.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from LLMHandler.llmhandler import LLMHandler
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class LLMConfig:
|
| 5 |
+
def __init__(self,user_control):
|
| 6 |
+
self.user_control = user_control
|
| 7 |
+
def get_llm_config(self):
|
| 8 |
+
|
| 9 |
+
user_control = self.user_control
|
| 10 |
+
handler = LLMHandler(user_control)
|
| 11 |
+
llm_config = handler.handle_request()
|
| 12 |
+
|
| 13 |
+
return llm_config
|
agents/assistantagent.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from autogen import AssistantAgent
|
| 2 |
+
import streamlit as st
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class TrackableAssistantAgent(AssistantAgent):
|
| 6 |
+
def _process_received_message(self, message, sender, silent):
|
| 7 |
+
if message and type(message)== str and sender.name =="Userproxy":
|
| 8 |
+
with st.chat_message("user"):
|
| 9 |
+
st.write(message)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
return super()._process_received_message(message, sender, silent)
|
| 13 |
+
|
| 14 |
+
|
agents/retrieveassistantagent.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from autogen import AssistantAgent
|
| 2 |
+
import streamlit as st
|
| 3 |
+
from autogen.agentchat.contrib.retrieve_assistant_agent import RetrieveAssistantAgent
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TrackableRetrieveAssistantAgent(RetrieveAssistantAgent):
|
| 8 |
+
def _process_received_message(self, message, sender, silent):
|
| 9 |
+
if type(message)== str and sender.name =="Userproxy":
|
| 10 |
+
with st.chat_message("user"):
|
| 11 |
+
st.write(message)
|
| 12 |
+
return super()._process_received_message(message, sender, silent)
|
agents/userproxyagent.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from autogen import UserProxyAgent
|
| 2 |
+
import streamlit as st
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class TrackableUserProxyAgent(UserProxyAgent):
|
| 6 |
+
def _process_received_message(self, message, sender, silent):
|
| 7 |
+
with st.chat_message(sender.name.lower()):
|
| 8 |
+
if type(message)==str:
|
| 9 |
+
st.write(message)
|
| 10 |
+
else :
|
| 11 |
+
st.write(message['content'])
|
| 12 |
+
return super()._process_received_message(message, sender, silent)
|
app.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import streamlit as st
|
| 3 |
+
from streamlitui.loadui import Streamlit_UI
|
| 4 |
+
from usecases.basicexample import BasicExample
|
| 5 |
+
from LLMS.llmconfig import LLMConfig
|
| 6 |
+
|
| 7 |
+
# MAIN Function START
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
if __name__ == "__main__":
|
| 11 |
+
|
| 12 |
+
user_inputs_controls = Streamlit_UI().load_streamlit_ui()
|
| 13 |
+
user_inputs_display = user_inputs_controls
|
| 14 |
+
# Mask API key in displayed outputs
|
| 15 |
+
# if "api_key" in user_inputs_controls:
|
| 16 |
+
# user_inputs_display["api_key"] = "*****"
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
with st.sidebar:
|
| 20 |
+
# Display selected configuration
|
| 21 |
+
st.write("User Inputs:")
|
| 22 |
+
st.json(user_inputs_display)
|
| 23 |
+
|
| 24 |
+
# Basic usecasse :
|
| 25 |
+
|
| 26 |
+
# userInput
|
| 27 |
+
problem = st.chat_input("Start Chat")
|
| 28 |
+
if problem :
|
| 29 |
+
# LLM Configuration
|
| 30 |
+
LLMConfig(user_inputs_controls).get_llm_config()
|
| 31 |
+
if 'config_list' in st.session_state['llm_config'] :
|
| 32 |
+
llm_config = st.session_state['llm_config']
|
| 33 |
+
obj_basic_example = BasicExample(assistant_name="Assistant", user_proxy_name='Userproxy',
|
| 34 |
+
llm_config=llm_config,
|
| 35 |
+
problem=problem)
|
| 36 |
+
obj_basic_example.run()
|
requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
pyautogen==0.2.32
|
| 3 |
+
groq
|
streamlitui/loadui.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import json
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class Streamlit_UI() :
|
| 6 |
+
def __init__(self):
|
| 7 |
+
pass
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# Function to load JSON files
|
| 11 |
+
def load_json(self,file_path):
|
| 12 |
+
try:
|
| 13 |
+
with open(file_path, 'r') as f:
|
| 14 |
+
return json.load(f)
|
| 15 |
+
except Exception as e:
|
| 16 |
+
st.error(f"Error loading {file_path}: {e}")
|
| 17 |
+
return []
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def load_streamlit_ui(self):
|
| 21 |
+
|
| 22 |
+
# Load configuration files
|
| 23 |
+
anthropic_file = "./LLMCONFIG/ANTROPIC_CLAUDE_CONFIG_LIST.json"
|
| 24 |
+
google_file = "./LLMCONFIG/GOOGLE_GEMINI_CONFIG_LIST.json"
|
| 25 |
+
mistral_file = "./LLMCONFIG/MISTRAL_AI_CONFIG_LIST.json"
|
| 26 |
+
ollama_file = "./LLMCONFIG/OLLAMA_LLM_CONFIG_LIST.json"
|
| 27 |
+
groq_file = "./LLMCONFIG/GROQ_CONFIG_LIST.json"
|
| 28 |
+
bedrock_file = "./LLMCONFIG/BEDROCK_CONFIG_LIST.json"
|
| 29 |
+
|
| 30 |
+
# Load model configurations
|
| 31 |
+
anthropic_models = self.load_json(anthropic_file)
|
| 32 |
+
google_models = self.load_json(google_file)
|
| 33 |
+
mistral_models = self.load_json(mistral_file)
|
| 34 |
+
ollama_models = self.load_json(ollama_file)
|
| 35 |
+
groq_models = self.load_json(groq_file)
|
| 36 |
+
bedrock_models = self.load_json(bedrock_file)
|
| 37 |
+
|
| 38 |
+
# Combine all API types into a single dictionary
|
| 39 |
+
api_data = {
|
| 40 |
+
"anthropic": anthropic_models,
|
| 41 |
+
"bedrock": bedrock_models,
|
| 42 |
+
"google": google_models,
|
| 43 |
+
"groq": groq_models,
|
| 44 |
+
"mistral": mistral_models,
|
| 45 |
+
"ollama": ollama_models,
|
| 46 |
+
"openai": [],
|
| 47 |
+
|
| 48 |
+
}
|
| 49 |
+
user_inputs = {}
|
| 50 |
+
|
| 51 |
+
with st.sidebar:
|
| 52 |
+
|
| 53 |
+
# Streamlit UI
|
| 54 |
+
st.title("LLM Configuraton ⚙️")
|
| 55 |
+
|
| 56 |
+
# Dropdown to select API type
|
| 57 |
+
user_inputs['api_type'] = api_type = st.selectbox("Select API Type", options=api_data.keys())
|
| 58 |
+
|
| 59 |
+
# Input handling for OpenAI
|
| 60 |
+
if api_type == "openai":
|
| 61 |
+
st.write("### Provide OpenAI Configuration Details")
|
| 62 |
+
selected_model_name = st.text_input("Enter Model Name", value="")
|
| 63 |
+
st.session_state['api_key'] = st.text_input("Enter API Key", value="", type="password")
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# Display selected configuration
|
| 67 |
+
st.write("### Selected Configuration")
|
| 68 |
+
st.write(f"API Type: {api_type}")
|
| 69 |
+
st.write(f"Model: {selected_model_name}")
|
| 70 |
+
st.write("User Inputs:")
|
| 71 |
+
st.json(user_inputs)
|
| 72 |
+
|
| 73 |
+
else:
|
| 74 |
+
# Get models based on selected API type
|
| 75 |
+
selected_models = api_data.get(api_type, [])
|
| 76 |
+
|
| 77 |
+
# Dropdown to select model
|
| 78 |
+
model_names = [model["model"] for model in selected_models]
|
| 79 |
+
selected_model_name = st.selectbox("Select Model", options=model_names)
|
| 80 |
+
|
| 81 |
+
# Find the selected model configuration
|
| 82 |
+
selected_model_config = next(
|
| 83 |
+
(model for model in selected_models if model["model"] == selected_model_name), {}
|
| 84 |
+
)
|
| 85 |
+
# Input fields for the selected model configuration
|
| 86 |
+
st.write("### Provide Configuration Details")
|
| 87 |
+
|
| 88 |
+
for key, value in selected_model_config.items():
|
| 89 |
+
if key not in ["api_type", "model"]:
|
| 90 |
+
# Use a password field for API keys, otherwise a text input
|
| 91 |
+
input_label = f"Enter {key}"
|
| 92 |
+
if "key" in key.lower():
|
| 93 |
+
st.session_state['api_key'] = user_inputs[key] = st.text_input(input_label, value="", type="password")
|
| 94 |
+
elif isinstance(value, bool):
|
| 95 |
+
user_inputs[key] = st.checkbox(input_label, value=value)
|
| 96 |
+
else:
|
| 97 |
+
user_inputs[key] = st.text_input(input_label, value=str(value))
|
| 98 |
+
|
| 99 |
+
user_inputs['selected_model_name']= selected_model_name
|
| 100 |
+
|
| 101 |
+
return user_inputs
|
| 102 |
+
|
| 103 |
+
|
usecases/basicexample.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
from agents.assistantagent import TrackableAssistantAgent
|
| 3 |
+
from agents.userproxyagent import TrackableUserProxyAgent
|
| 4 |
+
import streamlit as st
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class BasicExample:
|
| 8 |
+
def __init__(self, assistant_name, user_proxy_name, llm_config, problem):
|
| 9 |
+
self.assistant = TrackableAssistantAgent(name=assistant_name,
|
| 10 |
+
system_message="""you are helpful assistant. Reply "TERMINATE" in
|
| 11 |
+
the end when everything is done """,
|
| 12 |
+
human_input_mode="NEVER",
|
| 13 |
+
llm_config=llm_config,
|
| 14 |
+
)
|
| 15 |
+
self.user_proxy = TrackableUserProxyAgent(name=user_proxy_name,
|
| 16 |
+
system_message="You are Admin",
|
| 17 |
+
human_input_mode="NEVER",
|
| 18 |
+
llm_config=llm_config,
|
| 19 |
+
code_execution_config=False,
|
| 20 |
+
is_termination_msg=lambda x: x.get("content", "").strip().endswith(
|
| 21 |
+
"TERMINATE"))
|
| 22 |
+
self.problem = problem
|
| 23 |
+
self.loop = asyncio.new_event_loop()
|
| 24 |
+
asyncio.set_event_loop(self.loop)
|
| 25 |
+
|
| 26 |
+
async def initiate_chat(self):
|
| 27 |
+
await self.user_proxy.a_initiate_chat(self.assistant, max_turns=4, message=self.problem)
|
| 28 |
+
|
| 29 |
+
def run(self):
|
| 30 |
+
self.loop.run_until_complete(self.initiate_chat())
|