Spaces:
Build error
Build error
File size: 6,339 Bytes
3649a95 efc3fee 3649a95 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 | import os,re,sys,yaml
from rich import print as rp
from dotenv import load_dotenv, find_dotenv
from improvement_prompts import first_prompt, second_prompt,new_first_prompt, new_second_prompt
load_dotenv(find_dotenv())
import logging
from hugchat import hugchat
from hugchat.login import Login
from typing import List, Dict, Generator, Optional,Tuple
logging.basicConfig(filename='improvement_log.log', level=logging.INFO)
from UberToolkit import UberToolkit as UberK
class HugChatLLM:
def __init__(self, cookie_path_dir: str = "./cookies/"):
self.email = os.getenv("EMAIL")
self.password = os.getenv("PASSWD")
if not self.email or not self.password:
print("EMAIL and PASSWD environment variables must be set.")
sys.exit(1)
self.cookie_path_dir = cookie_path_dir
self.chatbot = self._login_and_create_chatbot()
self.current_conversation = self.chatbot.new_conversation(modelIndex=1, system_prompt='', switch_to=True)
self.img_gen_servers = ["Yntec/HuggingfaceDiffusion", "Yntec/WinningBlunder"]
#self.igllm=ImageGeneratorLLM()
self.triggers={'/improve:' :'Improve the following text: ',
'/fix:' :'Fix the following code: ',
"/save_yaml:" :self.save_yaml,
"/add_tab:" :"",
"/edit_tab:" :"",
}
def _login_and_create_chatbot(self) -> hugchat.ChatBot:
sign = Login(self.email, self.password)
cookies = sign.login(cookie_dir_path=self.cookie_path_dir, save_cookies=True)
return hugchat.ChatBot(cookies=cookies.get_dict())
def __call__(self, text: str, stream: bool = True, web_search: bool = False):
if stream:
return self.trigger_mon(input_string=self.query(text))
else:
text_result, code_result = self.trigger_mon(self.query(text, web_search))
return text_result, code_result
def query(self, text: str, web_search: bool = False) -> str:
if not text == '':
message_result = self.chatbot.chat(text, web_search=web_search)
return message_result.wait_until_done()
def stream_query(self, text: str) -> Generator[str, None, None]:
if not text == '':
for resp in self.chatbot.chat(text, stream=True):
yield resp
def new_conversation(self, model_index=1, system_prompt='', switch_to: bool = True) -> str:
return self.chatbot.new_conversation(modelIndex=model_index, system_prompt=system_prompt, switch_to=switch_to)
#return self.chatbot.get_conversation_info()
def get_remote_conversations(self, replace_conversation_list: bool = True) -> List[Dict]:
return self.chatbot.get_remote_conversations(replace_conversation_list=replace_conversation_list)
def get_conversation_list(self) -> List[Dict]:
return self.chatbot.get_conversation_list()
def get_available_models(self) -> List[str]:
return self.chatbot.get_available_llm_models()
def switch_model(self, index: int) -> None:
self.chatbot.switch_llm(index)
def get_conversation_info(self) -> Dict:
info = self.chatbot.get_conversation_info()
return {
"id": info.id,
"title": info.title,
"model": info.model,
"system_prompt": info.system_prompt,
"history": info.history
}
def search_assistant_by_name(self, assistant_name: str) -> Dict:
return self.chatbot.search_assistant(assistant_name=assistant_name)
def search_assistant_by_id(self, assistant_id: str) -> Dict:
return self.chatbot.search_assistant(assistant_id=assistant_id)
def get_assistant_list(self, page: int = 0) -> List[Dict]:
return self.chatbot.get_assistant_list_by_page(page=page)
def new_conversation_with_assistant(self, assistant: Dict, switch_to: bool = True) -> None:
self.chatbot.new_conversation(assistant=assistant, switch_to=switch_to)
return self.chatbot.get_conversation_info()
def delete_all_conversations(self) -> None:
self.chatbot.delete_all_conversations()
def get_available_models(self) -> List[Dict[str, str]]:
models = self.chatbot.get_available_llm_models()
for model in models:
logging.info(model.id)
logging.info(model.name)
return [{"id": model.id,"name": model.name} for model in models]
def save_yaml(self, input_string: str, file_path: str, file_name: str) -> str:
yaml_file_path = os.path.join(file_path, file_name)
rp.print(f'Saving YAML data: {input_string}\n\nTo: {yaml_file_path}')
# here we write the YAML data to a file
with open(yaml_file_path, 'w') as file:
yaml.dump(input_string, file)
return "YAML data saved successfully."
def trigger_mon(self, input_string) -> str:
'''in this we need to detect if the
AI response contains a key of the Dict triggers ers then fetch the input directly from the response behind the the triggerger....
and call the method lidyed in the value in the triggers Dict'''
# here we detect and route the trigger keys from responses
# using a dictionary mapping trigger keys to their respective prompts
for trigger, action in self.triggers.items():
if trigger in input_string:
input_split = input_string.split(trigger).pop().split('\n').pop(0)
rest_split = ' '.join(input_string.split(f'{trigger}{input_split}').replace('input','[trigger_processed]input'))
print(f'Detected trigger: {trigger}, fetching input: {input_split}')
action(input_split)
# if the trigger is found, we need to fetch the input directly from the response
return rest_split
# if no trigger is found, we return the input string as is
return input_string
class ImageGeneratorLLM(HugChatLLM):
def __init__(self):
super().__init__()
def __call__(self, prompt: str) -> str:
return self.generate_image(prompt)
def generate_image(self, prompt: str) -> str:
return self.chatbot.generate_image(prompt) |