Spaces:
Sleeping
Sleeping
Commit
Β·
9f396ec
1
Parent(s):
f152fb5
text generation - hf - 2 model
Browse files- .github/workflows/main.yml +24 -0
- .gitignore +3 -0
- .vscode/launch.json +16 -0
- README.md +13 -0
- app.py +55 -0
- configfile.ini +7 -0
- configfile.py +23 -0
- requirements.txt +3 -0
- src/agents/assistantagent.py +14 -0
- src/agents/retrieveassistantagent.py +12 -0
- src/agents/userproxyagent.py +12 -0
- src/hf_autogen/hfautogen.py +240 -0
- src/streamlitui/loadui.py +35 -0
- src/usecases/imggene.py +0 -0
- src/usecases/textgen.py +39 -0
.github/workflows/main.yml
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Sync to Hugging Face hub
|
| 2 |
+
on:
|
| 3 |
+
push:
|
| 4 |
+
branches: [main]
|
| 5 |
+
|
| 6 |
+
# to run this workflow manually from the Actions tab
|
| 7 |
+
workflow_dispatch:
|
| 8 |
+
|
| 9 |
+
jobs:
|
| 10 |
+
sync-to-hub:
|
| 11 |
+
runs-on: ubuntu-latest
|
| 12 |
+
steps:
|
| 13 |
+
- uses: actions/checkout@v3
|
| 14 |
+
with:
|
| 15 |
+
fetch-depth: 0
|
| 16 |
+
lfs: false
|
| 17 |
+
|
| 18 |
+
- name: Ignore large files
|
| 19 |
+
run : git filter-branch --index-filter 'git rm -rf --cached --ignore-unmatch "None/fast-bge-small-en.tar.gz"' HEAD
|
| 20 |
+
|
| 21 |
+
- name: Push to hub
|
| 22 |
+
env:
|
| 23 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
| 24 |
+
run: git push --force https://genaitiwari:$HF_TOKEN@huggingface.co/spaces/genaitiwari/AutogenWithHF main
|
.gitignore
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
*.pyc
|
| 3 |
+
/autogen_cache/42
|
.vscode/launch.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": "0.2.0",
|
| 3 |
+
"configurations": [
|
| 4 |
+
{
|
| 5 |
+
"name": "debug streamlit",
|
| 6 |
+
"type": "debugpy",
|
| 7 |
+
"request": "launch",
|
| 8 |
+
"program": "./.venv/Lib/site-packages/streamlit", // /home/xx/tmp/venv/bin/streamlit",
|
| 9 |
+
"args": [
|
| 10 |
+
"run",
|
| 11 |
+
"app.py"
|
| 12 |
+
],
|
| 13 |
+
"justMyCode": false
|
| 14 |
+
}
|
| 15 |
+
]
|
| 16 |
+
}
|
README.md
CHANGED
|
@@ -1,2 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# AutogenWithHF
|
| 2 |
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: AutogenWithHF
|
| 3 |
+
emoji: π
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: yellow
|
| 6 |
+
sdk: streamlit
|
| 7 |
+
sdk_version: 1.41.1
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
license: apache-2.0
|
| 11 |
+
short_description: autogen with custom model - hf for text ,image etc. usecase
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
# AutogenWithHF
|
| 15 |
|
app.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from configfile import Config
|
| 3 |
+
from src.hf_autogen.hfautogen import hf_llmconfig
|
| 4 |
+
from src.streamlitui.loadui import LoadStreamlitUI
|
| 5 |
+
from src.usecases.textgen import TexGeneration
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
# MAIN Function START
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
if __name__ == "__main__":
|
| 13 |
+
# config
|
| 14 |
+
obj_config = Config()
|
| 15 |
+
# load ui
|
| 16 |
+
ui = LoadStreamlitUI()
|
| 17 |
+
user_input = ui.load_streamlit_ui()
|
| 18 |
+
|
| 19 |
+
# # Configure LLM
|
| 20 |
+
# obj_llm_config = GroqLLM(user_controls_input=user_input)
|
| 21 |
+
# obj_llm_config.groq_llm_config()
|
| 22 |
+
# llm_config = st.session_state['llm_config']
|
| 23 |
+
|
| 24 |
+
# userInput
|
| 25 |
+
problem = st.chat_input("Start Chat ")
|
| 26 |
+
|
| 27 |
+
# configure llm
|
| 28 |
+
hf_llmconfig(selected_model = user_input["selected_hf_model"])
|
| 29 |
+
if 'config_list' in st.session_state['llm_config'] :
|
| 30 |
+
llm_config = st.session_state['llm_config']
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
if user_input['selected_usecase'] == "Text Generation":
|
| 34 |
+
st.subheader("Text generation")
|
| 35 |
+
if problem:
|
| 36 |
+
with st.chat_message("user"):
|
| 37 |
+
st.write(problem)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
obj_txtgen = TexGeneration(assistant_name="Assistant", user_proxy_name='Userproxy',
|
| 41 |
+
llm_config=llm_config,
|
| 42 |
+
problem=problem)
|
| 43 |
+
obj_txtgen.run()
|
| 44 |
+
|
| 45 |
+
elif user_input['selected_usecase'] == "Image Generation":
|
| 46 |
+
st.subheader("Image generation")
|
| 47 |
+
if problem:
|
| 48 |
+
with st.chat_message("user"):
|
| 49 |
+
st.write(problem)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
|
configfile.ini
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[DEFAULT]
|
| 2 |
+
PAGE_TITLE = AUTOGEN WITH HF
|
| 3 |
+
LLM_OPTIONS = Huggingface
|
| 4 |
+
USECASE_OPTIONS = Text Generation, Image Generation
|
| 5 |
+
TEXT_HF_MODEL_OPTIONS = mistralai/Mixtral-8x7B-Instruct-v0.1, meta-llama/Llama-3.1-8B
|
| 6 |
+
IMG_HF_MODEL_OPTIONS = stabilityai/stable-diffusion-3.5-large
|
| 7 |
+
|
configfile.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from configparser import ConfigParser
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class Config:
|
| 5 |
+
def __init__(self, config_file="configfile.ini"):
|
| 6 |
+
self.config = ConfigParser()
|
| 7 |
+
self.config.read(config_file)
|
| 8 |
+
|
| 9 |
+
def get_llm_options(self):
|
| 10 |
+
return self.config["DEFAULT"].get("LLM_OPTIONS").split(", ")
|
| 11 |
+
|
| 12 |
+
def get_usecase_options(self):
|
| 13 |
+
return self.config["DEFAULT"].get("USECASE_OPTIONS").split(", ")
|
| 14 |
+
|
| 15 |
+
def get_text_hf_model_options(self):
|
| 16 |
+
return self.config["DEFAULT"].get("TEXT_HF_MODEL_OPTIONS").split(", ")
|
| 17 |
+
|
| 18 |
+
def get_img_hf_model_options(self):
|
| 19 |
+
return self.config["DEFAULT"].get("IMG_HF_MODEL_OPTIONS").split(", ")
|
| 20 |
+
|
| 21 |
+
def get_page_title(self):
|
| 22 |
+
return self.config["DEFAULT"].get("PAGE_TITLE")
|
| 23 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
huggingface-hub
|
| 3 |
+
pyautogen==0.2.10
|
src/agents/assistantagent.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from autogen import AssistantAgent
|
| 2 |
+
import streamlit as st
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class TrackableAssistantAgent(AssistantAgent):
|
| 6 |
+
def _process_received_message(self, message, sender, silent):
|
| 7 |
+
if message and type(message)== str and sender.name =="Userproxy":
|
| 8 |
+
with st.chat_message("user"):
|
| 9 |
+
st.write(message)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
return super()._process_received_message(message, sender, silent)
|
| 13 |
+
|
| 14 |
+
|
src/agents/retrieveassistantagent.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from autogen import AssistantAgent
|
| 2 |
+
import streamlit as st
|
| 3 |
+
from autogen.agentchat.contrib.retrieve_assistant_agent import RetrieveAssistantAgent
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TrackableRetrieveAssistantAgent(RetrieveAssistantAgent):
|
| 8 |
+
def _process_received_message(self, message, sender, silent):
|
| 9 |
+
if type(message)== str and sender.name =="Userproxy":
|
| 10 |
+
with st.chat_message("user"):
|
| 11 |
+
st.write(message)
|
| 12 |
+
return super()._process_received_message(message, sender, silent)
|
src/agents/userproxyagent.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from autogen import UserProxyAgent
|
| 2 |
+
import streamlit as st
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class TrackableUserProxyAgent(UserProxyAgent):
|
| 6 |
+
def _process_received_message(self, message, sender, silent):
|
| 7 |
+
with st.chat_message(sender.name.lower()):
|
| 8 |
+
if type(message)==str:
|
| 9 |
+
st.write(message)
|
| 10 |
+
else :
|
| 11 |
+
st.write(message['content'])
|
| 12 |
+
return super()._process_received_message(message, sender, silent)
|
src/hf_autogen/hfautogen.py
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import autogen
|
| 2 |
+
from autogen import AssistantAgent, UserProxyAgent, GroupChatManager, GroupChat, ConversableAgent
|
| 3 |
+
from types import SimpleNamespace
|
| 4 |
+
import requests
|
| 5 |
+
import json
|
| 6 |
+
import os
|
| 7 |
+
import shutil
|
| 8 |
+
import random
|
| 9 |
+
import streamlit as st
|
| 10 |
+
|
| 11 |
+
from src.agents.assistantagent import TrackableAssistantAgent
|
| 12 |
+
from src.agents.userproxyagent import TrackableUserProxyAgent
|
| 13 |
+
|
| 14 |
+
class APIModelClient:
|
| 15 |
+
def __init__(self, config, **kwargs):
|
| 16 |
+
self.device = config.get("device", "cpu")
|
| 17 |
+
self.api_url = "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1"
|
| 18 |
+
#self.api_url = "https://api-inference.huggingface.co/models/google/gemma-7b-it" # Add the API URL to the config
|
| 19 |
+
self.headers = {"Authorization": "Bearer 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"} # Example: Add any required headers
|
| 20 |
+
|
| 21 |
+
self.model_name = config.get("model")
|
| 22 |
+
self.chat_index = 0
|
| 23 |
+
|
| 24 |
+
self.conversion_mem = ""
|
| 25 |
+
|
| 26 |
+
# self.tokenizer and self.model lines are removed or modified
|
| 27 |
+
|
| 28 |
+
def create(self, params):
|
| 29 |
+
conversation_history = ""
|
| 30 |
+
|
| 31 |
+
for message in params["messages"]:
|
| 32 |
+
prefix = ""
|
| 33 |
+
if message["role"] == "system":
|
| 34 |
+
prefix = f'Bot Description:\n'
|
| 35 |
+
elif message["role"] == "user":
|
| 36 |
+
prefix = f'User____:\n'
|
| 37 |
+
else:
|
| 38 |
+
prefix = f'Agent ({message["role"]}):\n'
|
| 39 |
+
conversation_history += prefix + f'{message["content"]}\n\n'
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
#try:
|
| 43 |
+
#_input = f'Given the context of the last message: {params["messages"][-2]["content"]}\n\n\nHere is input on the context: {params["messages"][-1]["content"]}'
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
#except Exception as e:
|
| 47 |
+
# print(e)
|
| 48 |
+
# _input = params["messages"][-1]["content"]
|
| 49 |
+
|
| 50 |
+
input_data = {
|
| 51 |
+
"inputs": conversation_history,
|
| 52 |
+
"parameters": {"max_new_tokens": 1000, "return_full_text": False, "do_sample": False},
|
| 53 |
+
"options": {"wait_for_model": True, "use_cache": False}
|
| 54 |
+
# Include any other parameters required by your API
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
# Sending the request to your model's API
|
| 58 |
+
response = requests.post(self.api_url, json=input_data, headers=self.headers)
|
| 59 |
+
|
| 60 |
+
if response.status_code == 200:
|
| 61 |
+
api_response = response.json()
|
| 62 |
+
# Assuming your API returns a similar structure to what was previously expected
|
| 63 |
+
if ("\n ```" in api_response[0]["generated_text"]):
|
| 64 |
+
api_response[0]["generated_text"] = api_response[0]["generated_text"].replace("\n ", "\n")
|
| 65 |
+
model_response = SimpleNamespace()
|
| 66 |
+
model_response.choices = []
|
| 67 |
+
|
| 68 |
+
choice = SimpleNamespace()
|
| 69 |
+
#choice.message = SimpleNamespace(content=api_response[0]["generated_text"].split("```")[1])
|
| 70 |
+
choice.message = SimpleNamespace(content=api_response[0]["generated_text"])
|
| 71 |
+
model_response.choices.append(choice)
|
| 72 |
+
|
| 73 |
+
model_response.model = self.model_name
|
| 74 |
+
|
| 75 |
+
self.chat_index += 1
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
return model_response
|
| 79 |
+
else:
|
| 80 |
+
raise Exception(f"API request failed with status code {response.status_code}: {response.text}")
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def message_retrieval(self, response):
|
| 84 |
+
"""Retrieve the messages from the response."""
|
| 85 |
+
choices = response.choices
|
| 86 |
+
return [choice.message.content for choice in choices]
|
| 87 |
+
|
| 88 |
+
def cost(self, response) -> float:
|
| 89 |
+
"""Calculate the cost of the response."""
|
| 90 |
+
response.cost = 0
|
| 91 |
+
return 0
|
| 92 |
+
|
| 93 |
+
@staticmethod
|
| 94 |
+
def get_usage(response):
|
| 95 |
+
# returns a dict of prompt_tokens, completion_tokens, total_tokens, cost, model
|
| 96 |
+
# if usage needs to be tracked, else None
|
| 97 |
+
return {}
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
class APIModelClientWithArguments(APIModelClient):
|
| 101 |
+
def __init__(self, config, hf_key, hf_url="https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1", **kwargs):
|
| 102 |
+
self.device = config.get("device", "cpu")
|
| 103 |
+
self.api_url = hf_url
|
| 104 |
+
# self.api_url = "https://api-inference.huggingface.co/models/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B" # Add the API URL to the config
|
| 105 |
+
|
| 106 |
+
self.headers = {"Authorization": f"Bearer {hf_key}"} # Example: Add any required headers
|
| 107 |
+
|
| 108 |
+
self.model_name = config.get("model")
|
| 109 |
+
self.chat_index = 0
|
| 110 |
+
|
| 111 |
+
self.conversion_mem = ""
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def hf_llmconfig(selected_model):
|
| 115 |
+
llm_config = {
|
| 116 |
+
"config_list": [{
|
| 117 |
+
"model": selected_model,
|
| 118 |
+
"model_client_cls": "APIModelClientWithArguments",
|
| 119 |
+
"device": ""
|
| 120 |
+
}]
|
| 121 |
+
}
|
| 122 |
+
st.session_state['llm_config'] = llm_config
|
| 123 |
+
return llm_config
|
| 124 |
+
def UserAgent(name, hf_key, max_consecutive_auto_reply=2, code_dir="coding", use_docker=False, system_message="You are a helpful AI assistant"):
|
| 125 |
+
llm_config = {
|
| 126 |
+
"config_list": [{
|
| 127 |
+
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
| 128 |
+
"model_client_cls": "APIModelClientWithArguments",
|
| 129 |
+
"device": ""
|
| 130 |
+
}]
|
| 131 |
+
}
|
| 132 |
+
user_agent = TrackableUserProxyAgent(
|
| 133 |
+
name="user_proxy",
|
| 134 |
+
max_consecutive_auto_reply=max_consecutive_auto_reply,
|
| 135 |
+
llm_config=llm_config,
|
| 136 |
+
is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
|
| 137 |
+
code_execution_config={
|
| 138 |
+
"work_dir": code_dir,
|
| 139 |
+
"use_docker": use_docker,
|
| 140 |
+
},
|
| 141 |
+
system_message=system_message,
|
| 142 |
+
human_input_mode="NEVER"
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
user_agent.register_model_client(model_client_cls=APIModelClientWithArguments, hf_key=hf_key)
|
| 146 |
+
|
| 147 |
+
return user_agent
|
| 148 |
+
|
| 149 |
+
def ModelAgent(name, hf_key, hf_url="https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1", system_message="", code_execution=False):
|
| 150 |
+
default_system_message = """You are a helpful AI assistant.
|
| 151 |
+
Solve tasks using your coding and language skills.
|
| 152 |
+
In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute. Make sure to prefix the code block with 'python' or 'sh' depending.
|
| 153 |
+
1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.
|
| 154 |
+
2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly.
|
| 155 |
+
Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.
|
| 156 |
+
When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.
|
| 157 |
+
If you want the user to save the code in a file before executing it, put # filename: <filename> inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.
|
| 158 |
+
If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try..
|
| 159 |
+
When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.
|
| 160 |
+
"""
|
| 161 |
+
|
| 162 |
+
if system_message == "":
|
| 163 |
+
system_message = default_system_message
|
| 164 |
+
|
| 165 |
+
llm_config = {
|
| 166 |
+
"config_list": [{
|
| 167 |
+
"model": "",
|
| 168 |
+
"model_client_cls": "APIModelClientWithArguments",
|
| 169 |
+
"device": ""
|
| 170 |
+
}]
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
agent = TrackableAssistantAgent(
|
| 176 |
+
name=name,
|
| 177 |
+
llm_config=llm_config,
|
| 178 |
+
system_message=system_message,
|
| 179 |
+
code_execution_config=code_execution,
|
| 180 |
+
|
| 181 |
+
)
|
| 182 |
+
agent.register_model_client(model_client_cls=APIModelClientWithArguments, hf_key=hf_key, hf_url=hf_url)
|
| 183 |
+
|
| 184 |
+
return agent
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
async def InitChat(user, agent, _input, summary_method="reflection_with_llm"):
|
| 188 |
+
def clear_directory_contents(dir_path):
|
| 189 |
+
try:
|
| 190 |
+
for item in os.listdir(dir_path):
|
| 191 |
+
item_path = os.path.join(dir_path, item)
|
| 192 |
+
if os.path.isfile(item_path) or os.path.islink(item_path):
|
| 193 |
+
os.remove(item_path) # Remove files and links
|
| 194 |
+
elif os.path.isdir(item_path):
|
| 195 |
+
shutil.rmtree(item_path) # Remove directories
|
| 196 |
+
shutil.rmtree(dir_path)
|
| 197 |
+
print(f"All contents of '{dir_path}' have been removed.")
|
| 198 |
+
except FileNotFoundError:
|
| 199 |
+
pass
|
| 200 |
+
|
| 201 |
+
#seed = random.randint(0, 99999)
|
| 202 |
+
seed = 42
|
| 203 |
+
#clear_directory_contents(f'./autogen_cache/{seed}')
|
| 204 |
+
|
| 205 |
+
custom_cache = autogen.Cache({"cache_seed": seed, "cache_path_root": "autogen_cache"})
|
| 206 |
+
|
| 207 |
+
await user.a_initiate_chat(
|
| 208 |
+
agent,
|
| 209 |
+
max_turns=2,
|
| 210 |
+
message=_input,
|
| 211 |
+
summary_method=summary_method,
|
| 212 |
+
cache=custom_cache,
|
| 213 |
+
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
#clear_directory_contents(f'./autogen_cache/{seed}')
|
| 217 |
+
|
| 218 |
+
def GroupChat(user, agents, _input, hf_key, hf_url="https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1", max_round=5):
|
| 219 |
+
llm_config = {
|
| 220 |
+
"config_list": [{
|
| 221 |
+
"model": "",
|
| 222 |
+
"model_client_cls": "APIModelClientWithArguments",
|
| 223 |
+
"device": ""
|
| 224 |
+
}]
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
groupchat = autogen.GroupChat(agents=agents, messages=[], max_round=max_round, speaker_selection_method="round_robin", allow_repeat_speaker=False)
|
| 228 |
+
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)
|
| 229 |
+
|
| 230 |
+
manager.register_model_client(model_client_cls=APIModelClientWithArguments, hf_key=hf_key, hf_url=hf_url)
|
| 231 |
+
InitChat(user, manager, _input)
|
| 232 |
+
|
| 233 |
+
#Write me a script to save the BTC chart from the past year to an image.
|
| 234 |
+
|
| 235 |
+
# if __name__ == "__main__":
|
| 236 |
+
# print("Running as main")
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
|
src/streamlitui/loadui.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from configfile import Config # Import the Config class
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class LoadStreamlitUI:
|
| 6 |
+
def __init__(self):
|
| 7 |
+
self.config = Config() # Create a Config instance
|
| 8 |
+
self.user_controls = {}
|
| 9 |
+
|
| 10 |
+
def load_streamlit_ui(self):
|
| 11 |
+
st.set_page_config(page_title= "π€ " + self.config.get_page_title(), layout="wide")
|
| 12 |
+
st.header("π€ " + self.config.get_page_title())
|
| 13 |
+
|
| 14 |
+
with st.sidebar:
|
| 15 |
+
|
| 16 |
+
# Use case selection
|
| 17 |
+
usecase_options = self.config.get_usecase_options()
|
| 18 |
+
self.user_controls["selected_usecase"] = st.selectbox("Select Usecases", usecase_options)
|
| 19 |
+
# Get options from config
|
| 20 |
+
llm_options = self.config.get_llm_options()
|
| 21 |
+
self.user_controls["selected_llm"] = st.selectbox("", llm_options)
|
| 22 |
+
|
| 23 |
+
# model selection
|
| 24 |
+
if self.user_controls["selected_usecase"] == "Text Generation":
|
| 25 |
+
|
| 26 |
+
model_options = self.config.get_text_hf_model_options()
|
| 27 |
+
self.user_controls["selected_hf_model"] = st.selectbox("Select Model", model_options)
|
| 28 |
+
elif self.user_controls["selected_usecase"] == "Image Generation":
|
| 29 |
+
model_options = self.config.get_img_hf_model_options()
|
| 30 |
+
self.user_controls["selected_hf_model"] = st.selectbox("Select Model", model_options)
|
| 31 |
+
|
| 32 |
+
# API key input
|
| 33 |
+
st.session_state['api_key'] = st.text_input("API Key",type="password")
|
| 34 |
+
|
| 35 |
+
return self.user_controls
|
src/usecases/imggene.py
ADDED
|
File without changes
|
src/usecases/textgen.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
from src.hf_autogen.hfautogen import APIModelClientWithArguments,ModelAgent, UserAgent, InitChat
|
| 3 |
+
from src.agents.assistantagent import TrackableAssistantAgent
|
| 4 |
+
from src.agents.userproxyagent import TrackableUserProxyAgent
|
| 5 |
+
import streamlit as st
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class TexGeneration:
|
| 9 |
+
def __init__(self, assistant_name, user_proxy_name, llm_config, problem):
|
| 10 |
+
self.assistant = TrackableAssistantAgent(name=assistant_name,
|
| 11 |
+
system_message="""you are helpful assistant. Reply "TERMINATE" in
|
| 12 |
+
the end when everything is done """,
|
| 13 |
+
human_input_mode="NEVER",
|
| 14 |
+
llm_config=llm_config,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
self.user_proxy = TrackableUserProxyAgent(name=user_proxy_name,
|
| 18 |
+
system_message="You are Admin",
|
| 19 |
+
human_input_mode="NEVER",
|
| 20 |
+
llm_config=llm_config,
|
| 21 |
+
code_execution_config=False,
|
| 22 |
+
is_termination_msg=lambda x: x.get("content", "").strip().endswith(
|
| 23 |
+
"TERMINATE"))
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
self.user = UserAgent("user_proxy",hf_key=st.session_state['api_key'])
|
| 27 |
+
self.assistant = ModelAgent("assistant",
|
| 28 |
+
hf_key=st.session_state['api_key'],
|
| 29 |
+
system_message="You are a friendly AI assistant.")
|
| 30 |
+
|
| 31 |
+
self.problem = problem
|
| 32 |
+
self.loop = asyncio.new_event_loop()
|
| 33 |
+
asyncio.set_event_loop(self.loop)
|
| 34 |
+
|
| 35 |
+
# async def initiate_chat(self):
|
| 36 |
+
# await InitChat(self.user, self.assistant, self.problem)
|
| 37 |
+
|
| 38 |
+
def run(self):
|
| 39 |
+
self.loop.run_until_complete(InitChat(self.user, self.assistant, self.problem))
|