Spaces:
Sleeping
Sleeping
Update prompt_templates.py
Browse files- prompt_templates.py +70 -70
prompt_templates.py
CHANGED
|
@@ -1,70 +1,70 @@
|
|
| 1 |
-
from imports import *
|
| 2 |
-
from prompts import *
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
os.environ['HUGGINGFACEHUB_API_TOKEN'] =
|
| 6 |
-
os.environ['OPENAI_API_KEY'] =
|
| 7 |
-
# ENV_HOST = "https://cloud.langfuse.com"
|
| 8 |
-
# ENV_SECRET_KEY = userdata.get('LF-SECRET')
|
| 9 |
-
# ENV_PUBLIC_KEY = userdata.get('LF-PUBLIC')
|
| 10 |
-
|
| 11 |
-
system_prompt_classifier = PromptTemplate(
|
| 12 |
-
input_variables=["QUERY", "PREV_QUERY"], template= system_prompt_classifier_template,
|
| 13 |
-
output_key="cassification"
|
| 14 |
-
)
|
| 15 |
-
|
| 16 |
-
system_memory_prompt = PromptTemplate(
|
| 17 |
-
input_variables=["QUERY", "API_LIST", "RAG", "PAST_QUERY", "PAST_RESPONSE"], template= system_prompt_memory_template
|
| 18 |
-
)
|
| 19 |
-
|
| 20 |
-
system_prompt = PromptTemplate(
|
| 21 |
-
input_variables=["QUERY", "API_LIST", "RAG"], template= system_prompt_template
|
| 22 |
-
)
|
| 23 |
-
|
| 24 |
-
follow_up_prompt = PromptTemplate(
|
| 25 |
-
input_variables=["QUERY", "chat_history"], template= follow_up_prompt_template
|
| 26 |
-
)
|
| 27 |
-
generation_prompt = PromptTemplate(
|
| 28 |
-
input_variables = ["API_LIST", "FEW_SHOT", "MODIFIED_ARG"],template = generation_prompt_template
|
| 29 |
-
)
|
| 30 |
-
|
| 31 |
-
memory = ConversationBufferWindowMemory(
|
| 32 |
-
memory_key="chat_history", input_key = "QUERY", k = 1,
|
| 33 |
-
return_messages=True
|
| 34 |
-
)
|
| 35 |
-
llm_gpt_3_5 = ChatOpenAI(temperature = 0.0, model = "gpt-3.5-turbo-1106")
|
| 36 |
-
llm_gpt_4 = ChatOpenAI(temperature = 0.0, model = "gpt-4")
|
| 37 |
-
|
| 38 |
-
mem_chain = LLMChain(llm=llm_gpt_3_5,
|
| 39 |
-
prompt=system_prompt_classifier,
|
| 40 |
-
memory = memory,
|
| 41 |
-
verbose=True)
|
| 42 |
-
|
| 43 |
-
query_chain_memory = LLMChain(llm=llm_gpt_3_5,
|
| 44 |
-
prompt=system_memory_prompt,
|
| 45 |
-
memory = memory,
|
| 46 |
-
verbose=True)
|
| 47 |
-
|
| 48 |
-
query_chain = LLMChain(llm=llm_gpt_3_5,
|
| 49 |
-
prompt=system_prompt,
|
| 50 |
-
memory = memory,
|
| 51 |
-
verbose=True)
|
| 52 |
-
|
| 53 |
-
format_chain = LLMChain(llm=llm_gpt_3_5,
|
| 54 |
-
prompt=follow_up_prompt,
|
| 55 |
-
memory = memory,
|
| 56 |
-
verbose=True)
|
| 57 |
-
|
| 58 |
-
generation_chain = LLMChain(llm= ChatOpenAI(temperature = 0.7, model = "gpt-4"),
|
| 59 |
-
prompt=generation_prompt,
|
| 60 |
-
output_key = 'QUERY',
|
| 61 |
-
verbose=False)
|
| 62 |
-
reprompt = PromptTemplate(
|
| 63 |
-
input_variables=["QUERY", "API_LIST", "CORRECTION_PROMPT", "chat_history"], template= reprompt_template
|
| 64 |
-
)
|
| 65 |
-
|
| 66 |
-
reprompt_chain = LLMChain(llm=llm_gpt_4,
|
| 67 |
-
prompt=reprompt,
|
| 68 |
-
output_key = 'new_response',
|
| 69 |
-
memory = memory,
|
| 70 |
-
verbose=True)
|
|
|
|
| 1 |
+
from imports import *
|
| 2 |
+
from prompts import *
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.getenv('HF')
|
| 6 |
+
os.environ['OPENAI_API_KEY'] = os.getenv('OpenAi')
|
| 7 |
+
# ENV_HOST = "https://cloud.langfuse.com"
|
| 8 |
+
# ENV_SECRET_KEY = userdata.get('LF-SECRET')
|
| 9 |
+
# ENV_PUBLIC_KEY = userdata.get('LF-PUBLIC')
|
| 10 |
+
|
| 11 |
+
system_prompt_classifier = PromptTemplate(
|
| 12 |
+
input_variables=["QUERY", "PREV_QUERY"], template= system_prompt_classifier_template,
|
| 13 |
+
output_key="cassification"
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
system_memory_prompt = PromptTemplate(
|
| 17 |
+
input_variables=["QUERY", "API_LIST", "RAG", "PAST_QUERY", "PAST_RESPONSE"], template= system_prompt_memory_template
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
system_prompt = PromptTemplate(
|
| 21 |
+
input_variables=["QUERY", "API_LIST", "RAG"], template= system_prompt_template
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
follow_up_prompt = PromptTemplate(
|
| 25 |
+
input_variables=["QUERY", "chat_history"], template= follow_up_prompt_template
|
| 26 |
+
)
|
| 27 |
+
generation_prompt = PromptTemplate(
|
| 28 |
+
input_variables = ["API_LIST", "FEW_SHOT", "MODIFIED_ARG"],template = generation_prompt_template
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
memory = ConversationBufferWindowMemory(
|
| 32 |
+
memory_key="chat_history", input_key = "QUERY", k = 1,
|
| 33 |
+
return_messages=True
|
| 34 |
+
)
|
| 35 |
+
llm_gpt_3_5 = ChatOpenAI(temperature = 0.0, model = "gpt-3.5-turbo-1106")
|
| 36 |
+
llm_gpt_4 = ChatOpenAI(temperature = 0.0, model = "gpt-4")
|
| 37 |
+
|
| 38 |
+
mem_chain = LLMChain(llm=llm_gpt_3_5,
|
| 39 |
+
prompt=system_prompt_classifier,
|
| 40 |
+
memory = memory,
|
| 41 |
+
verbose=True)
|
| 42 |
+
|
| 43 |
+
query_chain_memory = LLMChain(llm=llm_gpt_3_5,
|
| 44 |
+
prompt=system_memory_prompt,
|
| 45 |
+
memory = memory,
|
| 46 |
+
verbose=True)
|
| 47 |
+
|
| 48 |
+
query_chain = LLMChain(llm=llm_gpt_3_5,
|
| 49 |
+
prompt=system_prompt,
|
| 50 |
+
memory = memory,
|
| 51 |
+
verbose=True)
|
| 52 |
+
|
| 53 |
+
format_chain = LLMChain(llm=llm_gpt_3_5,
|
| 54 |
+
prompt=follow_up_prompt,
|
| 55 |
+
memory = memory,
|
| 56 |
+
verbose=True)
|
| 57 |
+
|
| 58 |
+
generation_chain = LLMChain(llm= ChatOpenAI(temperature = 0.7, model = "gpt-4"),
|
| 59 |
+
prompt=generation_prompt,
|
| 60 |
+
output_key = 'QUERY',
|
| 61 |
+
verbose=False)
|
| 62 |
+
reprompt = PromptTemplate(
|
| 63 |
+
input_variables=["QUERY", "API_LIST", "CORRECTION_PROMPT", "chat_history"], template= reprompt_template
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
reprompt_chain = LLMChain(llm=llm_gpt_4,
|
| 67 |
+
prompt=reprompt,
|
| 68 |
+
output_key = 'new_response',
|
| 69 |
+
memory = memory,
|
| 70 |
+
verbose=True)
|