devices / supplier.py
Roland Ding
0.0.0.0 origin point for device repo.
b426b8e
import openai
from application import *
openai.api_key = openai_api_key
def execute_prompt(prompt):
'''
execute_prompt function takes two arguments: text and prompt
text: the raw text from the article source
prompt: the prompt for the rational execution it needs to complete
return: a string, the result of the rational execution
'''
res = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
temperature=0,
max_tokens=500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return res.choices[0]["text"] if res.choices else "<error> failed to generate text</error>"
def format(**kwargs):
if "format" in kwargs:
return kwargs["format"]
return kwargs
def execute_instruction(article, instruction,model="gpt-3.5-turbo-16k",format="markdown"):
'''
execute_instruction function takes three arguments: article, instruction and model
article: the raw text from the article source
instruction: the instruction for the rational execution it needs to complete
model: the model used for the rational execution, default to gpt-3.5-turbo-16k
format: the format of the table, default to markdown
return: a string, the result of the rational execution
'''
msg_stream = [
{
"role":"system",
"content":article
}
]
msg_stream.append({
"role":"user",
"content":instruction
})
msg_stream.append({
"role":"user",
"content":f"Format the table in {format} syntax"
})
res= openai.ChatCompletion.create(
model=model,
messages=msg_stream,
temperature=0)
return res["choices"][0]["message"]["content"]
def create_inst(article, instructions):
msg_stream = [
{
"role":"system",
"content":article
}
]
for i in instructions:
msg_stream.append({
"role":"user",
"content":i
})
return msg_stream
def send_inst(stream, model="gpt-3.5-turbo-16k",temperature=0):
res= openai.ChatCompletion.create(
model=model,
messages=stream,
temperature=temperature)
return res["choices"][0]["message"]["content"]