Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,48 +4,39 @@ import os
|
|
| 4 |
|
| 5 |
openai.api_key = os.environ["OpenAPI_Key"]
|
| 6 |
|
| 7 |
-
manual = "uitleg"
|
| 8 |
-
|
| 9 |
-
# Function to make API call
|
| 10 |
def api_call(messages, temperature=0.5, model="gpt-3.5-turbo"):
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
def chunk_HTMLs(text, delimiter="HTML (ID:"):
|
| 18 |
-
""" Splits the text into chunks based on a delimiter, extracts the ID, and stores chunks in a dictionary. """
|
| 19 |
HTMLs = text.split(delimiter)
|
| 20 |
-
HTMLs = HTMLs[1:] if HTMLs[0] else HTMLs
|
| 21 |
-
|
| 22 |
html_dict = {}
|
| 23 |
for html in HTMLs:
|
| 24 |
-
end_id_index = html.find(")")
|
| 25 |
if end_id_index != -1:
|
| 26 |
-
html_id = html[:end_id_index].strip()
|
| 27 |
-
html_content = html[end_id_index+1:].strip()
|
| 28 |
-
full_html_id = "HTML: (ID:" + html_id + ")"
|
| 29 |
-
html_dict[full_html_id] = delimiter + "HTML (ID:" + html_id + ") " + html_content
|
| 30 |
-
|
| 31 |
return html_dict
|
| 32 |
-
|
| 33 |
-
def process_course(input_text, zorgvrager = "cliënt", zorgprofessional = "begeleider"):
|
| 34 |
-
""" Processes input text, extracts HTML chunks, analyzes each using an OpenAI model,
|
| 35 |
-
and conducts a second, separate analysis on each qualifying chunk. """
|
| 36 |
-
stored_HTMLs = chunk_HTMLs(input_text)
|
| 37 |
|
|
|
|
|
|
|
| 38 |
grouped_results = {}
|
| 39 |
-
last_html_id = None # Track the last processed HTML ID
|
| 40 |
for html_id, html_content in stored_HTMLs.items():
|
| 41 |
messages = [
|
| 42 |
-
{"role": "system", "content":
|
| 43 |
-
Oordeel: het perspectief is altijd zorgverlener.
|
| 44 |
-
Oordeel: het perspectief is deels zorgvrager."""},
|
| 45 |
{"role": "user", "content": html_content}
|
| 46 |
]
|
| 47 |
oordeel = api_call(messages, 0.5, "gpt-3.5-turbo")
|
| 48 |
-
|
| 49 |
if html_id not in grouped_results:
|
| 50 |
grouped_results[html_id] = [oordeel]
|
| 51 |
else:
|
|
@@ -53,26 +44,21 @@ Oordeel: het perspectief is deels zorgvrager."""},
|
|
| 53 |
|
| 54 |
if "deels zorgvrager" in oordeel:
|
| 55 |
follow_up_messages = [
|
| 56 |
-
{"role": "system", "content":
|
| 57 |
-
Citaat: [citaat van zin(nen) met foute doelgroep]
|
| 58 |
-
Correctie: [gecorrigeerde zin(nen)])
|
| 59 |
-
Verander hierbij zo min mogelijk aan de rest van de zin, verander alleen de doelgroep. Gebruik bijvoorbeeld net als in de bronteksten altijd 'je' in plaats van 'u'. Als je de zorgvrager expliciet wilt benoemen, gebruik dan de term {0}; als je de zorgverlener expliciet wilt benoemen, gebruik dan de term {1}.""".format(zorgvrager, zorgprofessional)},
|
| 60 |
{"role": "user", "content": html_content}
|
| 61 |
]
|
| 62 |
-
follow_up_result = api_call(follow_up_messages, 0.7, "gpt-4-turbo
|
| 63 |
grouped_results[html_id].append(follow_up_result)
|
| 64 |
-
|
| 65 |
return grouped_results
|
| 66 |
|
| 67 |
-
|
| 68 |
def display_results(grouped_results):
|
|
|
|
| 69 |
for html_id, results in grouped_results.items():
|
| 70 |
-
markdown_output += f"###
|
| 71 |
for result in results:
|
| 72 |
markdown_output += f"- {result}\n"
|
| 73 |
-
markdown_output += "\n---\n"
|
| 74 |
return markdown_output
|
| 75 |
-
|
| 76 |
|
| 77 |
inputs = [
|
| 78 |
gr.Textbox(lines=7, label="Input Text"),
|
|
@@ -83,10 +69,9 @@ outputs = gr.Markdown(label="Result", show_copy_button=True)
|
|
| 83 |
|
| 84 |
def interface_function(input_text, zorgvrager="cliënt", zorgprofessional="begeleider"):
|
| 85 |
results = process_course(input_text, zorgvrager, zorgprofessional)
|
| 86 |
-
markdown_output = display_results(results)
|
| 87 |
return markdown_output
|
| 88 |
|
| 89 |
-
# Create the Gradio interface with HTML-formatted output
|
| 90 |
iface = gr.Interface(
|
| 91 |
fn=interface_function,
|
| 92 |
inputs=inputs,
|
|
@@ -95,4 +80,4 @@ iface = gr.Interface(
|
|
| 95 |
description="Voer de tekst van een module in en druk op 'Submit'."
|
| 96 |
)
|
| 97 |
|
| 98 |
-
iface.launch
|
|
|
|
| 4 |
|
| 5 |
openai.api_key = os.environ["OpenAPI_Key"]
|
| 6 |
|
|
|
|
|
|
|
|
|
|
| 7 |
def api_call(messages, temperature=0.5, model="gpt-3.5-turbo"):
|
| 8 |
+
try:
|
| 9 |
+
response = openai.ChatCompletion.create(
|
| 10 |
+
messages=messages,
|
| 11 |
+
temperature=temperature,
|
| 12 |
+
model=model
|
| 13 |
+
)
|
| 14 |
+
return response.choices[0].message['content']
|
| 15 |
+
except Exception as e:
|
| 16 |
+
return f"API call failed: {str(e)}"
|
| 17 |
|
| 18 |
def chunk_HTMLs(text, delimiter="HTML (ID:"):
|
|
|
|
| 19 |
HTMLs = text.split(delimiter)
|
| 20 |
+
HTMLs = HTMLs[1:] if HTMLs[0] else HTMLs
|
|
|
|
| 21 |
html_dict = {}
|
| 22 |
for html in HTMLs:
|
| 23 |
+
end_id_index = html.find(")")
|
| 24 |
if end_id_index != -1:
|
| 25 |
+
html_id = html[:end_id_index].strip()
|
| 26 |
+
html_content = html[end_id_index+1:].strip()
|
| 27 |
+
full_html_id = "HTML: (ID:" + html_id + ")"
|
| 28 |
+
html_dict[full_html_id] = delimiter + "HTML (ID:" + html_id + ") " + html_content
|
|
|
|
| 29 |
return html_dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
+
def process_course(input_text, zorgvrager="cliënt", zorgprofessional="begeleider"):
|
| 32 |
+
stored_HTMLs = chunk_HTMLs(input_text)
|
| 33 |
grouped_results = {}
|
|
|
|
| 34 |
for html_id, html_content in stored_HTMLs.items():
|
| 35 |
messages = [
|
| 36 |
+
{"role": "system", "content": "Analyseer het perspectief..."},
|
|
|
|
|
|
|
| 37 |
{"role": "user", "content": html_content}
|
| 38 |
]
|
| 39 |
oordeel = api_call(messages, 0.5, "gpt-3.5-turbo")
|
|
|
|
| 40 |
if html_id not in grouped_results:
|
| 41 |
grouped_results[html_id] = [oordeel]
|
| 42 |
else:
|
|
|
|
| 44 |
|
| 45 |
if "deels zorgvrager" in oordeel:
|
| 46 |
follow_up_messages = [
|
| 47 |
+
{"role": "system", "content": f"Analyseer en (...), gebruik dan de term {zorgvrager}; als je de zorgverlener expliciet wilt benoemen, gebruik dan de term {zorgprofessional}."},
|
|
|
|
|
|
|
|
|
|
| 48 |
{"role": "user", "content": html_content}
|
| 49 |
]
|
| 50 |
+
follow_up_result = api_call(follow_up_messages, 0.7, "gpt-4-turbo")
|
| 51 |
grouped_results[html_id].append(follow_up_result)
|
|
|
|
| 52 |
return grouped_results
|
| 53 |
|
|
|
|
| 54 |
def display_results(grouped_results):
|
| 55 |
+
markdown_output = ""
|
| 56 |
for html_id, results in grouped_results.items():
|
| 57 |
+
markdown_output += f"### {html_id}\n"
|
| 58 |
for result in results:
|
| 59 |
markdown_output += f"- {result}\n"
|
| 60 |
+
markdown_output += "\n---\n"
|
| 61 |
return markdown_output
|
|
|
|
| 62 |
|
| 63 |
inputs = [
|
| 64 |
gr.Textbox(lines=7, label="Input Text"),
|
|
|
|
| 69 |
|
| 70 |
def interface_function(input_text, zorgvrager="cliënt", zorgprofessional="begeleider"):
|
| 71 |
results = process_course(input_text, zorgvrager, zorgprofessional)
|
| 72 |
+
markdown_output = display_results(results)
|
| 73 |
return markdown_output
|
| 74 |
|
|
|
|
| 75 |
iface = gr.Interface(
|
| 76 |
fn=interface_function,
|
| 77 |
inputs=inputs,
|
|
|
|
| 80 |
description="Voer de tekst van een module in en druk op 'Submit'."
|
| 81 |
)
|
| 82 |
|
| 83 |
+
iface.launch
|