| import openai |
| import tiktoken |
|
|
| from application import * |
| from utility import terminal_print |
|
|
| openai.api_key = openai_api_key |
| token_encoder = tiktoken.get_encoding("cl100k_base") |
|
|
|
|
| def request_retry(func): |
| max_retry = 5 |
| def deco_retry(*args,**kwargs): |
| import time |
| count = 0 |
| |
|
|
| while(count < max_retry): |
| try: |
| return func(*args,**kwargs) |
| except Exception as e: |
| print(f"Error: {e.__class__.__name__}, retrying in 5 seconds...") |
| time.sleep(5) |
| count += 1 |
| |
| return deco_retry |
|
|
| @terminal_print |
| def execute_prompt(prompt): |
| ''' |
| execute_prompt function takes two arguments: text and prompt |
| |
| text: the raw text from the article source |
| prompt: the prompt for the rational execution it needs to complete |
| |
| return: a string, the result of the rational execution |
| ''' |
|
|
| res = openai.Completion.create( |
| engine="text-davinci-003", |
| prompt=prompt, |
| temperature=0, |
| max_tokens=500, |
| top_p=1, |
| frequency_penalty=0, |
| presence_penalty=0 |
| ) |
| return res.choices[0]["text"] if res.choices else "<error> failed to generate text</error>" |
|
|
| @terminal_print |
| def format(**kwargs): |
| if "format" in kwargs: |
| return kwargs["format"] |
| return kwargs |
|
|
|
|
| @terminal_print |
| def execute_instruction(article, instruction,model="gpt-3.5-turbo-16k",format="markdown"): |
| ''' |
| execute_instruction function takes three arguments: article, instruction and model |
| |
| Parameters |
| ---------- |
| article: the raw text from the article source |
| instruction: the instruction for the rational execution it needs to complete |
| model: the model to use for the rational execution |
| format: the format of the table to be formatted |
| |
| return: a string, the result of the rational execution |
| |
| |
| ''' |
| msg_stream = [ |
| { |
| "role":"system", |
| "content":article |
| } |
| ] |
| |
| msg_stream.append({ |
| "role":"user", |
| "content":instruction |
| }) |
|
|
| msg_stream.append({ |
| "role":"user", |
| "content":f"Format the table in {format} syntax" |
| }) |
|
|
| res= openai.ChatCompletion.create( |
| model=model, |
| messages=msg_stream, |
| temperature=0) |
| |
| return res["choices"][0]["message"]["content"] |
|
|
| @terminal_print |
| def create_inst(article, instructions): |
| msg_stream = [ |
| { |
| "role":"system", |
| "content":article |
| } |
| ] |
| for i in instructions: |
| msg_stream.append({ |
| "role":"user", |
| "content":i |
| }) |
| |
| return msg_stream |
|
|
| @terminal_print |
| @request_retry |
| def send_inst(stream, model="gpt-3.5-turbo-16k",temperature=0): |
| res= openai.ChatCompletion.create( |
| model=model, |
| messages=stream, |
| temperature=temperature |
| ) |
| |
| return res["choices"][0]["message"]["content"] |
|
|
| def token_count(text): |
| return len(token_encoder.encode(text)) |
|
|
|
|