summary / supplier.py
Roland Ding
9.9.22.67 mass update of the application
bb32635
import openai
import tiktoken
from application import *
from utility import terminal_print
openai.api_key = openai_api_key
token_encoder = tiktoken.get_encoding("cl100k_base")
def request_retry(func): # need revision
max_retry = 5
def deco_retry(*args,**kwargs):
import time
count = 0
# print(f"Retrying {func.__name__}...")
while(count < max_retry):
try:
return func(*args,**kwargs)
except Exception as e:
print(f"Error: {e.__class__.__name__}, retrying in 5 seconds...")
time.sleep(5)
count += 1
return deco_retry
@terminal_print
def execute_prompt(prompt): # need revision
'''
execute_prompt function takes two arguments: text and prompt
text: the raw text from the article source
prompt: the prompt for the rational execution it needs to complete
return: a string, the result of the rational execution
'''
res = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
temperature=0,
max_tokens=500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return res.choices[0]["text"] if res.choices else "<error> failed to generate text</error>"
@terminal_print
def format(**kwargs): # need revision
if "format" in kwargs:
return kwargs["format"]
return kwargs
@terminal_print
def execute_instruction(article, instruction,model="gpt-3.5-turbo-16k",format="markdown"): # need revision
'''
execute_instruction function takes three arguments: article, instruction and model
Parameters
----------
article: the raw text from the article source
instruction: the instruction for the rational execution it needs to complete
model: the model to use for the rational execution
format: the format of the table to be formatted
return: a string, the result of the rational execution
'''
msg_stream = [
{
"role":"system",
"content":article
}
]
msg_stream.append({
"role":"user",
"content":instruction
})
msg_stream.append({
"role":"user",
"content":f"Format the table in {format} syntax"
})
res= openai.ChatCompletion.create(
model=model,
messages=msg_stream,
temperature=0)
return res["choices"][0]["message"]["content"]
@terminal_print
def create_inst(article, instructions): # need revision
msg_stream = [
{
"role":"system",
"content":article
}
]
for i in instructions:
msg_stream.append({
"role":"user",
"content":i
})
return msg_stream
@terminal_print
@request_retry
def send_inst(stream, model="gpt-3.5-turbo-16k",temperature=0): # need revision to change to async method
res= openai.ChatCompletion.create(
model=model,
messages=stream,
temperature=temperature
)
return res["choices"][0]["message"]["content"]
def token_count(text):
return len(token_encoder.encode(text))