data-translation-experiments / process_data.py
rosemariafontana's picture
Update process_data.py
5fb36e9 verified
raw
history blame
37.7 kB
import os
from pydantic import BaseModel, Field, validator, ValidationError
import gradio as gr
from openai import OpenAI
from typing import List, Dict, Any, Optional, Literal, Union
from enum import Enum
from gradio_toggle import Toggle
import json
from schema_classes import FarmActivities, Interactions, Trial, FarmActivitiesLite, PlantingLite, Log, Soil, Yield, InteractionsLite, TrialLite
# This API key must be in a "secret" in your environment. This is generated from OpenAI or the company's website that creates the model you wish to engage with.
# To use other models, some other endpoints would need to slightly change
# As is, the endpoint used requires a model that is capable of OpenAI's structured outputs.
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
client = OpenAI()
# What the survey should look like:
# Do you want to enter your text in one big block (free-form style) or in individual pieces (form-based style)? ###
# Free-form style means that a single JSON will be created from the block of text. This JSON will have a single level of nested that was created by the model.
# Form-based style means that individual JSON pieces will be created from different pieces of text. You will have a few more prompts to enter in this case. These JSON pieces will be manually combined with code. This JSON will also have a single level of nested, that was manually created.
# Parse either one big block of text conditionally or individual pieces conditionally
# values = big-block-input-text, individual-pieces-input-text
# actual long text value = onelonginputtext
# What model version do you want to process your input data? ###
# Parse ['gpt-4o-mini-2024-07-18', 'gpt-4o-2024-08-06']
# Do you want to pre-process your data? We will feed your data to a conversational model prior to creating the schema, with a prompt of your choosing.
# Parse ['yes', 'no']
# Do you want to enter multiple pre-prompts? Or will you only be entering one? You can enter up to three separate pre-prompts at this time.
# Parse ['yes', 'no']
# Do you have a specific pre-processing style in mind? This is just for data collection purposes. (Allow to pick multiple)
# Not specified means you just want to put text in and it doesn't fit the other categories
# Summarization means you're going to ask the model to produce some sort of summary as your pre-processing step.
# Specific field extraction means you're going to ask the models to extract some details as your pre-processing step.
# Parse possibly multiple values ['not_specified', 'summarization', 'specific-field-extraction']
# Parse [1-3 pre_processing_prompts text boxes]
# Now it is time to enter your prompts. The number of prompts will be directly related to which input data form you've chosen.
# This is the input data that will be parsed with this prompt:
# Input data here ###
# This is a sample prompt that you can choose or create your own:
# These options if free-form with same text for each
# Schema 1 prompts:
#farm_prompt = "Extract the farm information."
# Schema 2 prompts:
#interactions_prompt = "Extract the interactions information."
# Schema 3 prompts:
#trial_prompt = "Extract the trial information."
# Schema 1 prompts:
#field_prompt = "Extract the field information."
#plant_prompt = "Extract the planting information."
#log_prompt = "Extract the log information."
#soil_prompt = "Extract the soil information."
#yield_prompt = "Extract the yield information."
# Schema 2 prompts:
#interaction_prompt = "Extract the interaction information"
#person_prompt = "Please provide a list of people involved in this interaction, with each person's name, role, and any other relevant details."
# Schema 3 prompts:
#trial_prompt = "Extract the trial information"
#treatment_prompt = "Please provide a list of different treatments (strips or blocks with the same conditions applied) performed by the partner."
# Parameters - The Levers and Flippers to be chosen
# Use this for reference for now then work backwards
# Otter.ai-summary means that you've already pre-processed your input data using otter.ai and you don't ##### in this event it's just confusing don't include
#all_options = {
# 'model_version': ['gpt-4o-mini-2024-07-18 (Smaller version for faster responses)', 'gpt-4o-2024-08-06 (Latest GPT model with structured outputs)'],
# 'input_modality': ['free-text-input / single-JSON-creation (model creates entire JSON) / single-level-nested', 'form-text-input / stepwise-JSON-creation (individual pieces created then manually combined) / no-nesting (flat schema)'],
# 'pre_processing': ['yes', 'no'],
# 'pre_processing_multiple': ['yes', 'no'],
# 'pre_processing_specification': ['not_specified', 'summarization', 'specific-field-extraction'],
# 'prompting_style': ['no_specific_style', 'example_driven', 'role_specific', 'step_by_step', 'error_detection'],
# 'input_text' = ['whole_input_text': "value", 'input_text_pieces': ["piece_1": "value", "piece_2": "value"]],
# 'pre_processing_prompt' = ['pre_processing_prompts': ["prompt_1": "value", "prompt_2": "value"]],
# 'prompt' = ['prompts': ["prompt1": "value", "prompt2", "value"]]
#}
def generate_json(input_data, parameters):
"""
Function to prompt OpenAI API to generate structured JSON output.
Args:
input_data: (dict) The input data, preprocessed, from the user. Aka what will fill in the JSON
input_data["input_text"] = the preprocessed input text
input_data["input_context"] = depending on levers, empty or what is put in front of the prompt
parameters: (dict) All of the individual parameters and "flippers"
parameters["model_version"] = (str) what model should be used
parameters["chaining"] = (bool) whether or not the preprocessed input context should be chained (given to multiple models)
parameters["context_pre_prompt"], parameters["summary_pre_prompt"], parameters["conversation_pre_prompt"], parameters["example_pre_prompt"] = (str) all of the pre-prompts, separated
parameters["combined_pre_prompt"] = (str) concatenated individual pre-prompts
Returns:
3 processed data-filled JSON objects: farm_pretty_json, interactions_pretty_json, trial_pretty_json
"""
print("Generating JSON Whole!")
input_text = input_data["input_text"]
model_version = parameters["model_version"]
farm_prompt = "Extract the farm information."
interactions_prompt = "Extract the interactions information."
trial_prompt = "Extract the trial information."
if parameters["combined_pre_prompt"]:
farm_prompt = parameters["combined_pre_prompt"] + farm_prompt
interactions_prompt = parameters["combined_pre_prompt"] + interactions_prompt
trial_prompt = parameters["combined_pre_prompt"] + trial_prompt
try:
#Call OpenAI API to generate structured output based on prompt
farm_info_response = client.beta.chat.completions.parse(
model=model_version, # Use GPT model that supports structured output
messages=[
{"role": "system", "content": farm_prompt},
{"role": "user", "content": input_text}
],
response_format=FarmActivities,
)
farm_generated_json = farm_info_response.choices[0].message.parsed
print("FARM JSON: ")
print(farm_generated_json) # debugging
farm_pretty_json = farm_generated_json.json()
interactions_response = client.beta.chat.completions.parse(
model=model_version, # Use GPT model that supports structured output
messages=[
{"role": "system", "content": interactions_prompt},
{"role": "user", "content": input_text}
],
response_format=Interactions,
)
interactions_generated_json = interactions_response.choices[0].message.parsed
print("INTERACTIONS JSON: ")
print(interactions_generated_json) # debugging 2
interactions_pretty_json = interactions_generated_json.json()
trial_response = client.beta.chat.completions.parse(
model=model_version, # Use GPT model that supports structured output
messages=[
{"role": "system", "content": trial_prompt},
{"role": "user", "content": input_text}
],
response_format=Trial,
)
trial_generated_json = trial_response.choices[0].message.parsed
print("TRIALS JSON: ")
print(trial_generated_json) # debugging 3
trial_pretty_json = trial_generated_json.json()
return farm_pretty_json, interactions_pretty_json, trial_pretty_json
except ValidationError as e:
return {"error": str(e)}
except Exception as e:
return {"error": "Failed to generate valid JSON. " + str(e)}
# This is for the step-wise JSON creation
def generate_json_pieces(input_data, parameters):
"""
This is primarily for one of the flippers, which allows each individual JSON section to be created individually, then concatenates them all together.
It is proposed that perhaps the individual calls to the model will be more robust than giving the model all the data at once.
Args:
Args:
input_data: (dict) The input data, preprocessed, from the user. Aka what will fill in the JSON
input_data["input_text"] = (str) the preprocessed input text
input_data["input_context"] = (str) depending on levers, empty or what is put in front of the prompt
input_data["input_text_pieces"] = (dict) containing the individual split up prompt pieces: field_data_input, planting_data_input, log_data_input, soil_data_input, yield_data_input
parameters: (dict) All of the individual parameters and "flippers"
parameters["model_version"] = (str) what model should be used
parameters["chaining"] = (bool) whether or not the preprocessed input context should be chained (given to multiple models)
parameters["context_pre_prompt"], parameters["summary_pre_prompt"], parameters["conversation_pre_prompt"], parameters["example_pre_prompt"] = (str) all of the pre-prompts, separated
parameters["combined_pre_prompt"] = (str) concatenated individual pre-prompts
parameters["additional_json_pieces_options"] = (str) "Explicit specific pieces" or "Parse from one big input text" to indicate whether it's many function calls on one input text or many function calls on smaller pieces of input texts
Returns:
(str - json) A final combined JSON containing the data filled schema for Farm Activites
"""
print("Generating JSON Pieces!")
print("INPUT DATA")
print(input_data)
print("PARAMS")
print(parameters)
specification = input_data["input_text"]
model_version = parameters["model_version"]
print("Specification and Model Version")
print(specification)
print(model_version)
if parameters["pre_prompt"] and parameters["chaining"]:
print("Pre prompt is true")
field_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["field_data_input"]
planting_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["planting_data_input"]
log_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["log_data_input"]
soil_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["soil_data_input"]
yield_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["yield_data_input"]
interaction_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["interaction_data_input"]
person_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["person_data_input"]
trial_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["trial_data_input"]
treatment_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["treatment_data_input"]
else:
print("Pre prompt is false")
field_data_input = input_data["input_text_pieces"]["field_data_input"]
planting_data_input = input_data["input_text_pieces"]["planting_data_input"]
log_data_input = input_data["input_text_pieces"]["log_data_input"]
soil_data_input = input_data["input_text_pieces"]["soil_data_input"]
yield_data_input = input_data["input_text_pieces"]["yield_data_input"]
interaction_data_input = input_data["input_text_pieces"]["interaction_data_input"]
person_data_input = input_data["input_text_pieces"]["person_data_input"]
trial_data_input = input_data["input_text_pieces"]["trial_data_input"]
treatment_data_input = input_data["input_text_pieces"]["treatment_data_input"]
# Fix these prompts for all
print("Setting prompts")
field_prompt = "Extract the field information."
plant_prompt = "Extract the planting information."
log_prompt = "Extract the log information."
soil_prompt = "Extract the soil information."
yield_prompt = "Extract the yield information."
interaction_prompt = "Extract the interaction information"
person_prompt = "Please provide a list of people involved in this interaction, with each person's name, role, and any other relevant details."
trial_prompt = "Extract the trial information"
treatment_prompt = "Please provide a list of different treatments (strips or blocks with the same conditions applied) performed by the partner."
if parameters["combined_pre_prompt"]:
field_prompt = parameters["combined_pre_prompt"] + field_prompt
plant_prompt = parameters["combined_pre_prompt"] + plant_prompt
log_prompt = parameters["combined_pre_prompt"] + log_prompt
soil_prompt = parameters["combined_pre_prompt"] + soil_prompt
yield_prompt = parameters["combined_pre_prompt"] + yield_prompt
interaction_prompt = parameters["combined_pre_prompt"] + interaction_prompt
person_prompt = parameters["combined_pre_prompt"] + person_prompt
trial_prompt = parameters["combined_pre_prompt"] + trial_prompt
treatment_prompt = parameters["combined_pre_prompt"] + treatment_prompt
try:
# Call OpenAI API to generate structured output based on prompt
print("Getting all responses in pieces, starting with field response")
# All of this is for the first schema for farm activities
print("Field prompt")
print(field_prompt)
print("Field data input")
print(field_data_input)
field_response = client.beta.chat.completions.parse(
model=model_version, # Use GPT model that supports structured output
messages=[
{"role": "system", "content": field_prompt},
{"role": "user", "content": field_data_input}
],
response_format=FarmActivitiesLite,
)
field_generated_json = field_response.choices[0].message.parsed
print(type(field_generated_json))
print("FIELD JSON: ")
field_pretty_json = field_generated_json.dict()
print(field_pretty_json) # debugging
plant_response = client.beta.chat.completions.parse(
model=model_version, # Use GPT model that supports structured output
messages=[
{"role": "system", "content": plant_prompt},
{"role": "user", "content": planting_data_input}
],
response_format=PlantingLite,
)
plant_generated_json = plant_response.choices[0].message.parsed
print("PLANT JSON: ")
plant_pretty_json = plant_generated_json.dict()
print(plant_pretty_json) # debugging
log_response = client.beta.chat.completions.parse(
model=model_version, # Use GPT model that supports structured output
messages=[
{"role": "system", "content": log_prompt},
{"role": "user", "content": log_data_input}
],
response_format=Log,
)
log_generated_json = log_response.choices[0].message.parsed
print("LOG JSON: ")
log_pretty_json = log_generated_json.dict()
print(log_pretty_json) # debugging
soil_response = client.beta.chat.completions.parse(
model=model_version, # Use GPT model that supports structured output
messages=[
{"role": "system", "content": soil_prompt},
{"role": "user", "content": soil_data_input}
],
response_format=Soil,
)
soil_generated_json = soil_response.choices[0].message.parsed
print("SOIL JSON: ")
soil_pretty_json = soil_generated_json.dict()
print(soil_pretty_json) # debugging
yield_response = client.beta.chat.completions.parse(
model=model_version, # Use GPT model that supports structured output
messages=[
{"role": "system", "content": yield_prompt},
{"role": "user", "content": yield_data_input}
],
response_format=Yield,
)
yield_generated_json = yield_response.choices[0].message.parsed
print("YIELD JSON: ")
yield_pretty_json = yield_generated_json.dict()
print(yield_pretty_json) # debugging
plantings = {
**plant_pretty_json,
"logs": log_pretty_json,
"soil": soil_pretty_json,
"yield_": yield_pretty_json
}
farm_activities = {
**field_pretty_json,
"plantings": plantings
}
print("ADDED DICTS")
print(farm_activities)
print("FINAL JSON: ")
final_pretty_farm_activity_json = json.dumps(farm_activities, indent=4)
print(final_pretty_farm_activity_json)
# This is for the second schema now, interactions
print("Interaction prompt")
print(interaction_prompt)
print("Interaction data input")
print(interaction_data_input)
interaction_response = client.beta.chat.completions.parse(
model=model_version, # Use GPT model that supports structured output
messages=[
{"role": "system", "content": interaction_prompt},
{"role": "user", "content": interaction_data_input}
],
response_format=InteractionsLite,
)
interaction_generated_json = interaction_response.choices[0].message.parsed
print("INTERACTION JSON: ")
interaction_pretty_json = interaction_generated_json.dict()
print(interaction_pretty_json) # debugging
print("Person prompt")
print(person_prompt)
print("Person data input")
print(person_data_input)
interaction_response = client.beta.chat.completions.parse(
model=model_version, # Use GPT model that supports structured output
messages=[
{"role": "system", "content": person_prompt},
{"role": "user", "content": person_data_input}
],
response_format=Person,
)
person_generated_json = person_response.choices[0].message.parsed
print("PERSON JSON: ")
person_pretty_json = person_generated_json.dict()
print(person_pretty_json) # debugging
interactions = {
**interaction_pretty_json,
"people": person_generated_json
}
print("ADDED DICTS 2")
print(interactions)
print("FINAL JSON: ")
final_pretty_interactions_json = json.dumps(interactions, indent=4)
print(final_pretty_interactions_json)
# This is for the third schema now, trials
print("Trial prompt")
print(trial_prompt)
print("Trial data input")
print(trial_data_input)
trial_response = client.beta.chat.completions.parse(
model=model_version, # Use GPT model that supports structured output
messages=[
{"role": "system", "content": trial_prompt},
{"role": "user", "content": trial_data_input}
],
response_format=TrialLite,
)
trial_generated_json = trial_response.choices[0].message.parsed
print("TRIAL JSON: ")
trial_pretty_json = trial_generated_json.dict()
print(trial_pretty_json) # debugging
print("Treatment prompt")
print(treatment_prompt)
print("Treatment data input")
print(treatment_data_input)
treatment_response = client.beta.chat.completions.parse(
model=model_version, # Use GPT model that supports structured output
messages=[
{"role": "system", "content": treatment_prompt},
{"role": "user", "content": treatment_data_input}
],
response_format=Treatment,
)
treatment_generated_json = treatment_response.choices[0].message.parsed
print("TREATMENT JSON: ")
treatment_pretty_json = treatment_generated_json.dict()
print(treatment_pretty_json) # debugging
trials = {
**trial_pretty_json,
"treatments": treatment_generated_json
}
print("ADDED DICTS 3")
print(trials)
print("TREATMENT JSON: ")
final_pretty_trials_json = json.dumps(trials, indent=4)
print(final_pretty_trials_json)
return final_pretty_farm_activity_json, final_pretty_interactions_json, final_pretty_trials_json
except Exception as e:
return {"error": "Failed to generate valid JSON. " + str(e)}
def pre_processing(input_data, parameters):
"""
In the event there's a pre-prompt, process the pre-prompts and input text accordingly
Args:
input_data: (dict) The input data, preprocessed, from the user. Aka what will fill in the JSON
input_data["input_text"] = (str) the preprocessed input text
input_data["input_context"] = (str) depending on levers, empty or what is put in front of the prompt
input_data["input_text_pieces"] = (dict) containing the individual split up prompt pieces: field_data_input, planting_data_input, log_data_input, soil_data_input, yield_data_input
parameters: (dict) All of the individual parameters and "flippers"
parameters["model_version"] = (str) what model should be used
parameters["chaining"] = (bool) whether or not the preprocessed input context should be chained (given to multiple models)
parameters["context_pre_prompt"], parameters["summary_pre_prompt"], parameters["conversation_pre_prompt"], parameters["example_pre_prompt"] = (str) all of the pre-prompts, separated
parameters["combined_pre_prompt"] = (str) concatenated individual pre-prompts
parameters["additional_json_pieces_options"] = (str) "Explicit specific pieces" or "Parse from one big input text" to indicate whether it's many function calls on one input text or many function calls on smaller pieces of input texts
Returns:
(dict) input_data
input_data["input_context"] = (str) the text which should be used as context or "EMPTY" to indicate there is no context
input_data["input_text"] = (str) input text
"""
print("Starting preprocessing")
if input_data["stepwise_json_creation"][0] == "stepwisejsoncreation":
print("Stepwise Creation")
input_data["input_text_pieces"]["pre_processed_pieces"] = {}
if parameters["chaining"]:
print("Chaining")
for text_label, text_body in input_data["input_text_pieces"].items():
if 'data_input' in text_label:
for parameter_name, parameter_value in parameters.items():
if 'pre_prompt' in parameter_name and parameter_value and not isinstance(parameter_value, bool) and text_body:
print("Text Label")
print(text_label)
print("Prompt followed by data entered")
print(parameter_value)
print(text_body)
response = client.chat.completions.create(
model=parameters["model_version"],
messages=[
{"role": "system", "content": parameter_value},
{"role": "user", "content": text_body}
]
)
response_text = response.choices[0].message.content
print("Response text")
print(response_text)
input_data["input_text_pieces"]["pre_processed_pieces"][text_label] = response_text
return input_data
if input_data["stepwise_json_creation"][0] == "singlejsoncreation":
if parameters["chaining"]:
input_text = input_data["input_text"]
pre_processing_list = [parameters["context_pre_prompt"], parameters["summary_pre_prompt"], parameters["conversation_pre_prompt"], parameters["example_pre_prompt"]]
print("PreProcessingList")
print(pre_processing_list)
for pre_prompt in pre_processing_list:
try:
print("Pre-Processing: ")
if pre_prompt:
print("Prompt: ")
print(pre_prompt)
print("Input Text: ")
print(input_text)
print("Model: ")
print(parameters["model_version"])
response = client.chat.completions.create(
model=parameters["model_version"],
messages=[
{"role": "system", "content": pre_prompt},
{"role": "user", "content": input_text}
]
)
response_text = response.choices[0].message.content
print("Response Text: ")
print(response_text)
input_text = response_text
except Exception as e:
print(f"Failed to parse response as JSON. Error was: {e}")
input_data["input_text"] = input_text
return input_data
def process_specifications(input_data, parameters):
"""
Once the parameters and data are processed, do the pre-processing and then generate JSONs
Args:
input_data: (dict) The input data, preprocessed, from the user. Aka what will fill in the JSON
input_data["input_text"] = (str) the preprocessed input text
input_data["input_context"] = (str) depending on levers, empty or what is put in front of the prompt
input_data["input_text_pieces"] = (dict) containing the individual split up prompt pieces: field_data_input, planting_data_input, log_data_input, soil_data_input, yield_data_input
parameters: (dict) All of the individual parameters and "flippers"
parameters["pre_prompt"] = (bool) whether or not there is a pre-prompt to process through pre_processing()
parameters["model_version"] = (str) what model should be used
parameters["chaining"] = (bool) whether or not the preprocessed input context should be chained (given to multiple models)
parameters["context_pre_prompt"], parameters["summary_pre_prompt"], parameters["conversation_pre_prompt"], parameters["example_pre_prompt"] = (str) all of the pre-prompts, separated
parameters["combined_pre_prompt"] = (str) concatenated individual pre-prompts
parameters["additional_json_pieces_options"] = (str) "Explicit specific pieces" or "Parse from one big input text" to indicate whether it's many function calls on one input text or many function calls on smaller pieces of input texts
Returns:
3 processed data-filled JSON objects: farm_pretty_json, interactions_pretty_json, trial_pretty_json
"""
print("Processing specifications")
print("Here is also the input data")
print(input_data)
print("Here is also the parameters")
print(parameters)
# here is where parsing and other things will happen before
if input_data["stepwise_json_creation"][0] == "stepwisejsoncreation":
print("You are continuing with stepwise json creation")
if parameters["pre_prompt"] == True:
print("You are continuing with pre_prompt processing")
processed_input = pre_processing(input_data, parameters)
else:
print("You have elsed into no pre-processing")
processed_input = input_data
return generate_json_pieces(processed_input, parameters)
elif input_data["stepwise_json_creation"][0] == "singlejsoncreation":
print("You are elifing into single json creation")
#input_data["input_context"] = "EMPTY"
if parameters["pre_prompt"] == True:
print("You are preprocessing now")
processed_input = pre_processing(input_data, parameters)
else:
print("You do not have any preprocessing now")
processed_input = input_data
return generate_json(processed_input, parameters)
def parse_survey_stack_parameters(data):
"""
Parse the incoming parameters from the parameter survey
Args:
data: (json) JSON retrieved from surveystack API after retrieving survey info/details
Returns:
processed_data (dict)
processed_data["pre_prompt"] = (bool) whether or not there is a pre-prompt to process through pre_processing()
processed_data["model_version"] = (str) what model should be used
processed_data["chaining"] = (bool) whether or not the preprocessed input context should be chained (given to multiple models)
processed_data["context_pre_prompt"], parameters["summary_pre_prompt"], parameters["conversation_pre_prompt"], parameters["example_pre_prompt"] = (str) all of the pre-prompts, separated
processed_data["combined_pre_prompt"] = (str) concatenated individual pre-prompts
processed_data["additional_json_pieces_options"] = (str) "Explicit specific pieces" or "Parse from one big input text" to indicate whether it's many function calls on one input text or many function calls on smaller pieces of input texts
"""
processed_data = {}
processed_data["model_version"] = data[0]['data']['modelversion']['value'][0]
print("DATA: ")
print(data)
try:
print("Extracting parameters")
pre_promp_parameters = data[0]['data']['group_2']
if pre_promp_parameters['preprompt']['value'][0] == 'continue_preprompts':
processed_data["pre_prompt"] = True
# Accessing context and other prompts, with defaults in case they are None
processed_data["context_pre_prompt"] = pre_promp_parameters.get('contextpreprompt', {}).get('value', None)
processed_data["summary_pre_prompt"] = pre_promp_parameters.get('summarypreprompt', {}).get('value', None)
processed_data["conversation_pre_prompt"] = pre_promp_parameters.get('conversationpreprompt', {}).get('value', None)
processed_data["example_pre_prompt"] = pre_promp_parameters.get('examplepreprompt', {}).get('value', None)
# Check if chaining is set to "yes" or "no"
chaining_value = pre_promp_parameters.get('prepromptchaining', {}).get('value', [None])[0]
if chaining_value == "no":
# Combine prompts if chaining is "no"
combined_prompt = " ".join(
filter(None, [
processed_data["context_pre_prompt"],
processed_data["summary_pre_prompt"],
processed_data["conversation_pre_prompt"],
processed_data["example_pre_prompt"]
])
)
processed_data["chaining"] = False
processed_data["combined_pre_prompt"] = combined_prompt
else:
# Set combined_pre_prompt to None if chaining is enabled
processed_data["chaining"] = True
processed_data["combined_pre_prompt"] = None
else:
# Set fields to None if preprompt is not "continue_preprompts"
processed_data["pre_prompt"] = False
processed_data["context_pre_prompt"] = None
processed_data["summary_pre_prompt"] = None
processed_data["conversation_pre_prompt"] = None
processed_data["example_pre_prompt"] = None
processed_data["chaining"] = False
processed_data["combined_pre_prompt"] = None
except Exception as e:
print(f"An error occurred: {e}")
print("Done Extracting parameters:")
print(str(processed_data))
return processed_data
def parse_survey_stack_data(data):
"""
Parse the incoming data from the survey stack survey
Args:
data: (json) JSON retrieved from surveystack API after retrieving survey info/details
Returns:
processed_data
processed_data["input_text"] = (str) the raw input text
"""
print("PROCESSING SURVEY STACK DATA")
processed_data = {}
print("JUST PRINTING OUT THE DATA FOR YA")
print(data)
processed_data["stepwise_json_creation"] = data[0]['data']['stepwisejsoncreation']['value']
print("STEPWISE?: " + str(processed_data["stepwise_json_creation"]))
if processed_data["stepwise_json_creation"][0] == "stepwisejsoncreation":
print("IN THE STEP")
farm_management_inputs = data[0]['data']['group_4']
print("FARM MANAGEMENT INPUTS" + str(farm_management_inputs))
processed_data["input_text_pieces"] = {}
processed_data["input_text_pieces"]["field_data_input"] = farm_management_inputs.get('field_data_input', {}).get('value', None)
processed_data["input_text_pieces"]["planting_data_input"] = farm_management_inputs.get('planting_data_input', {}).get('value', None)
processed_data["input_text_pieces"]["log_data_input"] = farm_management_inputs.get('log_data_input', {}).get('value', None)
processed_data["input_text_pieces"]["soil_data_input"] = farm_management_inputs.get('soil_data_input', {}).get('value', None)
processed_data["input_text_pieces"]["yield_data_input"] = farm_management_inputs.get('yield_data_input', {}).get('value', None)
processed_data["input_text"] = "EMPTY"
print("NEXT SCHEMA INPUTS")
interactions_inputs = data[0]['data']['group_5']
print("INTERACTIONS INPUTS" + str(interactions_inputs))
processed_data["input_text_pieces"]["interaction_data_input"] = interactions_inputs.get('interaction_data_input', {}).get('value', None)
processed_data["input_text_pieces"]["person_data_input"] = interactions_inputs.get('person_data_input', {}).get('value', None)
print("NEXT SCHEMA INPUTS 2")
trials_inputs = data[0]['data']['group_6']
print("TRIALS INPUTS" + str(trials_inputs))
processed_data["input_text_pieces"]["trial_data_input"] = trials_inputs.get('trial_data_input', {}).get('value', None)
processed_data["input_text_pieces"]["treatment_data_input"] = trials_inputs.get('treatment_data_input', {}).get('value', None)
elif processed_data["stepwise_json_creation"][0] == "singlejsoncreation":
print("IN THE SINGLE")
processed_data["input_text"] = data[0]['data']['onelonginputtext']['value']
print(processed_data["input_text"])
processed_data["input_text_pieces"] = {}
processed_data["input_text_pieces"]["field_data_input"] = "EMPTY"
processed_data["input_text_pieces"]["planting_data_input"] = "EMPTY"
processed_data["input_text_pieces"]["log_data_input"] = "EMPTY"
processed_data["input_text_pieces"]["soil_data_input"] = "EMPTY"
processed_data["input_text_pieces"]["yield_data_input"] = "EMPTY"
processed_data["input_text_pieces"]["interaction_data_input"] = "EMPTY"
processed_data["input_text_pieces"]["person_data_input"] = "EMPTY"
processed_data["input_text_pieces"]["trial_data_input"] = "EMPTY"
processed_data["input_text_pieces"]["treatment_data_input"] = "EMPTY"
print("RETURNING DATA")
print(processed_data)
return processed_data