Update main.py
Browse files
main.py
CHANGED
|
@@ -88,23 +88,46 @@ USER QUERY:{user_input}
|
|
| 88 |
TOPICS TO BE EXCLUDED:{excluded_topics}
|
| 89 |
YOUR OUTPUT SHOULD CONSIST ONLY A VALID PYTHON LIST, DO NOT ADD ADDITIONAL COMMENTS
|
| 90 |
"""
|
|
|
|
| 91 |
|
| 92 |
-
|
| 93 |
-
client = OpenAI(
|
| 94 |
api_key=TOGETHER_API_KEY,
|
| 95 |
base_url="https://together.hconeai.com/v1",
|
| 96 |
default_headers={ "Helicone-Auth": f"Bearer {HELICON_API_KEY}"})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
|
| 98 |
messages=[{"role": "system", "content": SysPrompt},{"role": "user", "content": message}]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 99 |
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
frequency_penalty=0.2
|
| 105 |
-
)
|
| 106 |
-
return response.choices[0].message.content
|
| 107 |
-
|
| 108 |
|
| 109 |
|
| 110 |
|
|
@@ -127,20 +150,20 @@ def json_from_text(text):
|
|
| 127 |
|
| 128 |
def generate_topics(user_input, num_topics, previous_queries):
|
| 129 |
prompt = prompt_topics.format(user_input=user_input, num_topics=num_topics)
|
| 130 |
-
response_topics = together_response(prompt, model=
|
| 131 |
subtopics = json_from_text(response_topics)
|
| 132 |
return subtopics
|
| 133 |
|
| 134 |
def generate_subtopics(main_task,user_input,num_topics,excluded_topics):
|
| 135 |
excluded_topics = ",".join(excluded_topics)
|
| 136 |
prompt = prompt_subtopics.format(main_task = main_task,user_input=user_input, num_topics=num_topics, excluded_topics=excluded_topics)
|
| 137 |
-
response_topics = together_response(prompt, model=
|
| 138 |
subtopics = json_from_text(response_topics)
|
| 139 |
return subtopics
|
| 140 |
|
| 141 |
def generate_report(topic, description):
|
| 142 |
prompt = f"""create a detailed report on: {topic} by following the instructions: {description}"""
|
| 143 |
-
md_report = together_response(prompt, model =
|
| 144 |
return md_to_html(md_report)
|
| 145 |
|
| 146 |
def get_images(query, num_results):
|
|
@@ -218,7 +241,7 @@ async def generate_recommendations(input: RecommendationInput):
|
|
| 218 |
|
| 219 |
response_topics = json_from_text(
|
| 220 |
together_response(
|
| 221 |
-
prompt, model=
|
| 222 |
)
|
| 223 |
)
|
| 224 |
return {"recommendations": response_topics}
|
|
|
|
| 88 |
TOPICS TO BE EXCLUDED:{excluded_topics}
|
| 89 |
YOUR OUTPUT SHOULD CONSIST ONLY A VALID PYTHON LIST, DO NOT ADD ADDITIONAL COMMENTS
|
| 90 |
"""
|
| 91 |
+
### ------LLM CONFIG-------- ###
|
| 92 |
|
| 93 |
+
together_client = OpenAI(
|
|
|
|
| 94 |
api_key=TOGETHER_API_KEY,
|
| 95 |
base_url="https://together.hconeai.com/v1",
|
| 96 |
default_headers={ "Helicone-Auth": f"Bearer {HELICON_API_KEY}"})
|
| 97 |
+
|
| 98 |
+
groq_client = OpenAI(
|
| 99 |
+
api_key=GROQ_API_KEY,
|
| 100 |
+
base_url="https://groq.hconeai.com/openai/v1",
|
| 101 |
+
default_headers={ "Helicone-Auth": f"Bearer {HELICON_API_KEY}"})
|
| 102 |
+
|
| 103 |
+
# Groq model names
|
| 104 |
+
llm_default_small = "llama3-8b-8192"
|
| 105 |
+
llm_default_medium = "llama3-70b-8192"
|
| 106 |
+
|
| 107 |
+
# Together Model names (fallback)
|
| 108 |
+
llm_fallback_small = "meta-llama/Llama-3-8b-chat-hf"
|
| 109 |
+
llm_fallback_medium = "meta-llama/Llama-3-70b-chat-hf"
|
| 110 |
+
|
| 111 |
+
### ------END OF LLM CONFIG-------- ###
|
| 112 |
+
|
| 113 |
+
def together_response(message, model = llm_default_small, SysPrompt = SysPromptDefault, temperature=0.2, frequency_penalty =0.1, max_tokens= 2000):
|
| 114 |
|
| 115 |
messages=[{"role": "system", "content": SysPrompt},{"role": "user", "content": message}]
|
| 116 |
+
params = {
|
| 117 |
+
"model": model,
|
| 118 |
+
"messages": messages,
|
| 119 |
+
"temperature": temperature,
|
| 120 |
+
"frequency_penalty": frequency_penalty,
|
| 121 |
+
"max_tokens": max_tokens
|
| 122 |
+
}
|
| 123 |
+
try:
|
| 124 |
+
response = groq_client.chat.completions.create(**params)
|
| 125 |
+
return response.choices[0].message.content
|
| 126 |
|
| 127 |
+
except Exception as e:
|
| 128 |
+
params["model"] = llm_fallback_small if model == llm_default_small else llm_fallback_medium
|
| 129 |
+
response = together_client.chat.completions.create(**params)
|
| 130 |
+
return response.choices[0].message.content
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
|
| 132 |
|
| 133 |
|
|
|
|
| 150 |
|
| 151 |
def generate_topics(user_input, num_topics, previous_queries):
|
| 152 |
prompt = prompt_topics.format(user_input=user_input, num_topics=num_topics)
|
| 153 |
+
response_topics = together_response(prompt, model=llm_default_medium, SysPrompt=SysPromptList, temperature=1)
|
| 154 |
subtopics = json_from_text(response_topics)
|
| 155 |
return subtopics
|
| 156 |
|
| 157 |
def generate_subtopics(main_task,user_input,num_topics,excluded_topics):
|
| 158 |
excluded_topics = ",".join(excluded_topics)
|
| 159 |
prompt = prompt_subtopics.format(main_task = main_task,user_input=user_input, num_topics=num_topics, excluded_topics=excluded_topics)
|
| 160 |
+
response_topics = together_response(prompt, model=llm_default_medium, SysPrompt=SysPromptList, temperature=1)
|
| 161 |
subtopics = json_from_text(response_topics)
|
| 162 |
return subtopics
|
| 163 |
|
| 164 |
def generate_report(topic, description):
|
| 165 |
prompt = f"""create a detailed report on: {topic} by following the instructions: {description}"""
|
| 166 |
+
md_report = together_response(prompt, model = llm_default_medium, SysPrompt = SysPromptMdOffline)
|
| 167 |
return md_to_html(md_report)
|
| 168 |
|
| 169 |
def get_images(query, num_results):
|
|
|
|
| 241 |
|
| 242 |
response_topics = json_from_text(
|
| 243 |
together_response(
|
| 244 |
+
prompt, model=llm_default_small, SysPrompt=SysPromptList,temperature=1
|
| 245 |
)
|
| 246 |
)
|
| 247 |
return {"recommendations": response_topics}
|