psaegert's picture
Upload 201 files
2c34d2f
rganization = config['openai']['organization_id'].strip('"').strip("'")
ENGINE = config['openai']['engine'].strip('"').strip("'")
prompt_config = {
'engine': ENGINE,
'temperature': TEMPERATURE,
'max_tokens': MAX_TOKENS,
'shell': SHELL,
'multi_turn': MULTI_TURN,
'token_count': 0
}
return PromptFile(PROMPT_CONTEXT.name, prompt_config)
def is_sensitive_content(content):
"""
Check if the content contains sensitive content
Refer to https://beta.openai.com/docs/engines/content-filter for explanation
"""
if len(content) == 0:
return False
response = openai.Completion.create(
engine="content-filter-alpha",
prompt = "<|endoftext|>"+content+"\n--\nLabel:",
temperature=0,
max_tokens=1,
top_p=0,
logprobs=10
)
output_label = response["choices"][0]["text"]
# This is the probability at which we evaluate that a "2" is likely real
# vs. should be discarded as a false positive
toxic_threshold = -0.355
if output_label == "2":
# If the model returns "2", return its confidence in 2 or other output-labels
logprobs = response["choices"][0]["logprobs"]["top_logprobs"][0]
# If the model is not sufficiently confident in "2",
# choose the most probable of "0" or "1"
# Guaranteed to have a confidence for 2 since this was the selected token.
if logprobs["2"] < toxic_threshold:
logprob_0 = logprobs.get("0", None)
logprob_1 = logprobs.get("1", None)
# If both "0" and "1" have probabilities, set the output label
# to whichever is most probable
if logprob_0 is not None and logprob_1 is not None:
if logprob_0 >= logprob_1:
output_label = "0"
else:
output_label = "1"