Spaces:
Build error
Build error
Create purellm.py
Browse files- purellm.py +142 -0
purellm.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os,re,sys,yaml
|
| 2 |
+
from rich import print as rp
|
| 3 |
+
from dotenv import load_dotenv, find_dotenv
|
| 4 |
+
from improvement_prompts import first_prompt, second_prompt,new_first_prompt, new_second_prompt
|
| 5 |
+
load_dotenv(find_dotenv())
|
| 6 |
+
import logging
|
| 7 |
+
from hugchat import hugchat
|
| 8 |
+
from hugchat.login import Login
|
| 9 |
+
from typing import List, Dict, Generator, Optional,Tuple
|
| 10 |
+
logging.basicConfig(filename='improvement_log.log', level=logging.INFO)
|
| 11 |
+
|
| 12 |
+
class HugChatLLM:
|
| 13 |
+
def __init__(self, cookie_path_dir: str = "./cookies/"):
|
| 14 |
+
self.email = os.getenv("EMAIL")
|
| 15 |
+
self.password = os.getenv("PASSWD")
|
| 16 |
+
|
| 17 |
+
if not self.email or not self.password:
|
| 18 |
+
print("EMAIL and PASSWD environment variables must be set.")
|
| 19 |
+
sys.exit(1)
|
| 20 |
+
self.cookie_path_dir = cookie_path_dir
|
| 21 |
+
self.chatbot = self._login_and_create_chatbot()
|
| 22 |
+
self.current_conversation = self.chatbot.new_conversation(modelIndex=1, system_prompt='', switch_to=True)
|
| 23 |
+
self.img_gen_servers = ["Yntec/HuggingfaceDiffusion", "Yntec/WinningBlunder"]
|
| 24 |
+
#self.igllm=ImageGeneratorLLM()
|
| 25 |
+
|
| 26 |
+
self.triggers={'/improve:' :'Improve the following text: ',
|
| 27 |
+
'/fix:' :'Fix the following code: ',
|
| 28 |
+
"/save_yaml:" :self.save_yaml,
|
| 29 |
+
"/add_tab:" :"",
|
| 30 |
+
"/edit_tab:" :"",
|
| 31 |
+
}
|
| 32 |
+
def _login_and_create_chatbot(self) -> hugchat.ChatBot:
|
| 33 |
+
sign = Login(self.email, self.password)
|
| 34 |
+
cookies = sign.login(cookie_dir_path=self.cookie_path_dir, save_cookies=True)
|
| 35 |
+
return hugchat.ChatBot(cookies=cookies.get_dict())
|
| 36 |
+
|
| 37 |
+
def __call__(self, text: str, stream: bool = True, web_search: bool = False):
|
| 38 |
+
if stream:
|
| 39 |
+
return self.trigger_mon(input_string=self.query(text))
|
| 40 |
+
else:
|
| 41 |
+
text_result, code_result = self.trigger_mon(self.query(text, web_search))
|
| 42 |
+
|
| 43 |
+
return text_result, code_result
|
| 44 |
+
|
| 45 |
+
def query(self, text: str, web_search: bool = False) -> str:
|
| 46 |
+
if not text == '':
|
| 47 |
+
message_result = self.chatbot.chat(text, web_search=web_search)
|
| 48 |
+
|
| 49 |
+
return message_result.wait_until_done()
|
| 50 |
+
|
| 51 |
+
def stream_query(self, text: str) -> Generator[str, None, None]:
|
| 52 |
+
if not text == '':
|
| 53 |
+
for resp in self.chatbot.chat(text, stream=True):
|
| 54 |
+
yield resp
|
| 55 |
+
|
| 56 |
+
def new_conversation(self, model_index=1, system_prompt='', switch_to: bool = True) -> str:
|
| 57 |
+
return self.chatbot.new_conversation(modelIndex=model_index, system_prompt=system_prompt, switch_to=switch_to)
|
| 58 |
+
#return self.chatbot.get_conversation_info()
|
| 59 |
+
|
| 60 |
+
def get_remote_conversations(self, replace_conversation_list: bool = True) -> List[Dict]:
|
| 61 |
+
return self.chatbot.get_remote_conversations(replace_conversation_list=replace_conversation_list)
|
| 62 |
+
|
| 63 |
+
def get_conversation_list(self) -> List[Dict]:
|
| 64 |
+
return self.chatbot.get_conversation_list()
|
| 65 |
+
|
| 66 |
+
def get_available_models(self) -> List[str]:
|
| 67 |
+
return self.chatbot.get_available_llm_models()
|
| 68 |
+
|
| 69 |
+
def switch_model(self, index: int) -> None:
|
| 70 |
+
self.chatbot.switch_llm(index)
|
| 71 |
+
|
| 72 |
+
def get_conversation_info(self) -> Dict:
|
| 73 |
+
info = self.chatbot.get_conversation_info()
|
| 74 |
+
return {
|
| 75 |
+
"id": info.id,
|
| 76 |
+
"title": info.title,
|
| 77 |
+
"model": info.model,
|
| 78 |
+
"system_prompt": info.system_prompt,
|
| 79 |
+
"history": info.history
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
def search_assistant_by_name(self, assistant_name: str) -> Dict:
|
| 83 |
+
return self.chatbot.search_assistant(assistant_name=assistant_name)
|
| 84 |
+
|
| 85 |
+
def search_assistant_by_id(self, assistant_id: str) -> Dict:
|
| 86 |
+
return self.chatbot.search_assistant(assistant_id=assistant_id)
|
| 87 |
+
|
| 88 |
+
def get_assistant_list(self, page: int = 0) -> List[Dict]:
|
| 89 |
+
return self.chatbot.get_assistant_list_by_page(page=page)
|
| 90 |
+
|
| 91 |
+
def new_conversation_with_assistant(self, assistant: Dict, switch_to: bool = True) -> None:
|
| 92 |
+
self.chatbot.new_conversation(assistant=assistant, switch_to=switch_to)
|
| 93 |
+
return self.chatbot.get_conversation_info()
|
| 94 |
+
|
| 95 |
+
def delete_all_conversations(self) -> None:
|
| 96 |
+
self.chatbot.delete_all_conversations()
|
| 97 |
+
|
| 98 |
+
def get_available_models(self) -> List[Dict[str, str]]:
|
| 99 |
+
models = self.chatbot.get_available_llm_models()
|
| 100 |
+
for model in models:
|
| 101 |
+
logging.info(model.id)
|
| 102 |
+
logging.info(model.name)
|
| 103 |
+
|
| 104 |
+
return [{"id": model.id,"name": model.name} for model in models]
|
| 105 |
+
|
| 106 |
+
def save_yaml(self, input_string: str, file_path: str, file_name: str) -> str:
|
| 107 |
+
yaml_file_path = os.path.join(file_path, file_name)
|
| 108 |
+
rp.print(f'Saving YAML data: {input_string}\n\nTo: {yaml_file_path}')
|
| 109 |
+
# here we write the YAML data to a file
|
| 110 |
+
with open(yaml_file_path, 'w') as file:
|
| 111 |
+
yaml.dump(input_string, file)
|
| 112 |
+
return "YAML data saved successfully."
|
| 113 |
+
|
| 114 |
+
def trigger_mon(self, input_string) -> str:
|
| 115 |
+
'''in this we need to detect if the
|
| 116 |
+
AI response contains a key of the Dict triggers ers then fetch the input directly from the response behind the the triggerger....
|
| 117 |
+
and call the method lidyed in the value in the triggers Dict'''
|
| 118 |
+
# here we detect and route the trigger keys from responses
|
| 119 |
+
# using a dictionary mapping trigger keys to their respective prompts
|
| 120 |
+
for trigger, action in self.triggers.items():
|
| 121 |
+
if trigger in input_string:
|
| 122 |
+
input_split = input_string.split(trigger).pop().split('\n').pop(0)
|
| 123 |
+
rest_split = ' '.join(input_string.split(f'{trigger}{input_split}').replace('input','[trigger_processed]input'))
|
| 124 |
+
print(f'Detected trigger: {trigger}, fetching input: {input_split}')
|
| 125 |
+
action(input_split)
|
| 126 |
+
# if the trigger is found, we need to fetch the input directly from the response
|
| 127 |
+
|
| 128 |
+
return rest_split
|
| 129 |
+
# if no trigger is found, we return the input string as is
|
| 130 |
+
return input_string
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
class ImageGeneratorLLM(HugChatLLM):
|
| 135 |
+
def __init__(self):
|
| 136 |
+
super().__init__()
|
| 137 |
+
|
| 138 |
+
def __call__(self, prompt: str) -> str:
|
| 139 |
+
return self.generate_image(prompt)
|
| 140 |
+
|
| 141 |
+
def generate_image(self, prompt: str) -> str:
|
| 142 |
+
return self.chatbot.generate_image(prompt)
|