| import os |
| from pydantic import BaseModel, Field, validator, ValidationError |
| import gradio as gr |
| from openai import OpenAI |
| from typing import List, Dict, Any, Optional, Literal, Union |
| from enum import Enum |
| from gradio_toggle import Toggle |
| import json |
|
|
| from schema_classes import FarmActivities, Interactions, Trial, FarmActivitiesLite, PlantingLite, Log, Soil, Yield, InteractionsLite, TrialLite |
|
|
|
|
| |
| |
| |
| os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") |
| client = OpenAI() |
|
|
| |
|
|
| |
|
|
| |
| |
| |
| |
|
|
| |
|
|
| |
| |
|
|
| |
| |
|
|
| |
| |
|
|
| |
|
|
| |
| |
| |
| |
|
|
| |
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| def generate_json(input_data, parameters): |
| """ |
| Function to prompt OpenAI API to generate structured JSON output. |
| |
| Args: |
| input_data: (dict) The input data, preprocessed, from the user. Aka what will fill in the JSON |
| input_data["input_text"] = the preprocessed input text |
| input_data["input_context"] = depending on levers, empty or what is put in front of the prompt |
| parameters: (dict) All of the individual parameters and "flippers" |
| parameters["model_version"] = (str) what model should be used |
| parameters["chaining"] = (bool) whether or not the preprocessed input context should be chained (given to multiple models) |
| parameters["context_pre_prompt"], parameters["summary_pre_prompt"], parameters["conversation_pre_prompt"], parameters["example_pre_prompt"] = (str) all of the pre-prompts, separated |
| parameters["combined_pre_prompt"] = (str) concatenated individual pre-prompts |
| |
| Returns: |
| 3 processed data-filled JSON objects: farm_pretty_json, interactions_pretty_json, trial_pretty_json |
| """ |
| print("Generating JSON Whole!") |
| input_text = input_data["input_text"] |
| model_version = parameters["model_version"] |
|
|
| farm_prompt = "Extract the farm information." |
| interactions_prompt = "Extract the interactions information." |
| trial_prompt = "Extract the trial information." |
| |
| if parameters["combined_pre_prompt"]: |
| farm_prompt = parameters["combined_pre_prompt"] + farm_prompt |
| interactions_prompt = parameters["combined_pre_prompt"] + interactions_prompt |
| trial_prompt = parameters["combined_pre_prompt"] + trial_prompt |
|
|
| |
| try: |
| |
| |
| farm_info_response = client.beta.chat.completions.parse( |
| model=model_version, |
| messages=[ |
| {"role": "system", "content": farm_prompt}, |
| {"role": "user", "content": input_text} |
| ], |
| response_format=FarmActivities, |
| ) |
|
|
| farm_generated_json = farm_info_response.choices[0].message.parsed |
|
|
| |
| print("FARM JSON: ") |
| print(farm_generated_json) |
| farm_pretty_json = farm_generated_json.json() |
|
|
| |
| interactions_response = client.beta.chat.completions.parse( |
| model=model_version, |
| messages=[ |
| {"role": "system", "content": interactions_prompt}, |
| {"role": "user", "content": input_text} |
| ], |
| response_format=Interactions, |
| ) |
| |
| interactions_generated_json = interactions_response.choices[0].message.parsed |
|
|
| print("INTERACTIONS JSON: ") |
| print(interactions_generated_json) |
| interactions_pretty_json = interactions_generated_json.json() |
|
|
|
|
| trial_response = client.beta.chat.completions.parse( |
| model=model_version, |
| messages=[ |
| {"role": "system", "content": trial_prompt}, |
| {"role": "user", "content": input_text} |
| ], |
| response_format=Trial, |
| ) |
| |
| trial_generated_json = trial_response.choices[0].message.parsed |
|
|
| print("TRIALS JSON: ") |
| print(trial_generated_json) |
|
|
| trial_pretty_json = trial_generated_json.json() |
|
|
| return farm_pretty_json, interactions_pretty_json, trial_pretty_json |
|
|
| except ValidationError as e: |
| return {"error": str(e)} |
| except Exception as e: |
| return {"error": "Failed to generate valid JSON. " + str(e)} |
|
|
| |
| def generate_json_pieces(input_data, parameters): |
| """ |
| This is primarily for one of the flippers, which allows each individual JSON section to be created individually, then concatenates them all together. |
| It is proposed that perhaps the individual calls to the model will be more robust than giving the model all the data at once. |
| |
| Args: |
| Args: |
| input_data: (dict) The input data, preprocessed, from the user. Aka what will fill in the JSON |
| input_data["input_text"] = (str) the preprocessed input text |
| input_data["input_context"] = (str) depending on levers, empty or what is put in front of the prompt |
| input_data["input_text_pieces"] = (dict) containing the individual split up prompt pieces: field_data_input, planting_data_input, log_data_input, soil_data_input, yield_data_input |
| |
| parameters: (dict) All of the individual parameters and "flippers" |
| parameters["model_version"] = (str) what model should be used |
| parameters["chaining"] = (bool) whether or not the preprocessed input context should be chained (given to multiple models) |
| parameters["context_pre_prompt"], parameters["summary_pre_prompt"], parameters["conversation_pre_prompt"], parameters["example_pre_prompt"] = (str) all of the pre-prompts, separated |
| parameters["combined_pre_prompt"] = (str) concatenated individual pre-prompts |
| parameters["additional_json_pieces_options"] = (str) "Explicit specific pieces" or "Parse from one big input text" to indicate whether it's many function calls on one input text or many function calls on smaller pieces of input texts |
| |
| Returns: |
| (str - json) A final combined JSON containing the data filled schema for Farm Activites |
| """ |
| print("Generating JSON Pieces!") |
| |
| print("INPUT DATA") |
| print(input_data) |
| print("PARAMS") |
| print(parameters) |
| |
| specification = input_data["input_text"] |
| model_version = parameters["model_version"] |
|
|
| print("Specification and Model Version") |
| print(specification) |
| print(model_version) |
| |
| if parameters["pre_prompt"] and parameters["chaining"]: |
| print("Pre prompt is true") |
| field_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["field_data_input"] |
| planting_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["planting_data_input"] |
| log_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["log_data_input"] |
| soil_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["soil_data_input"] |
| yield_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["yield_data_input"] |
|
|
| interaction_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["interaction_data_input"] |
| person_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["person_data_input"] |
|
|
| trial_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["trial_data_input"] |
| treatment_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["treatment_data_input"] |
| else: |
| print("Pre prompt is false") |
| field_data_input = input_data["input_text_pieces"]["field_data_input"] |
| planting_data_input = input_data["input_text_pieces"]["planting_data_input"] |
| log_data_input = input_data["input_text_pieces"]["log_data_input"] |
| soil_data_input = input_data["input_text_pieces"]["soil_data_input"] |
| yield_data_input = input_data["input_text_pieces"]["yield_data_input"] |
|
|
| interaction_data_input = input_data["input_text_pieces"]["interaction_data_input"] |
| person_data_input = input_data["input_text_pieces"]["person_data_input"] |
|
|
| trial_data_input = input_data["input_text_pieces"]["trial_data_input"] |
| treatment_data_input = input_data["input_text_pieces"]["treatment_data_input"] |
|
|
|
|
| |
| print("Setting prompts") |
| field_prompt = "Extract the field information." |
| plant_prompt = "Extract the planting information." |
| log_prompt = "Extract the log information." |
| soil_prompt = "Extract the soil information." |
| yield_prompt = "Extract the yield information." |
|
|
| interaction_prompt = "Extract the interaction information" |
| person_prompt = "Please provide a list of people involved in this interaction, with each person's name, role, and any other relevant details." |
|
|
| trial_prompt = "Extract the trial information" |
| treatment_prompt = "Please provide a list of different treatments (strips or blocks with the same conditions applied) performed by the partner." |
|
|
|
|
| if parameters["combined_pre_prompt"]: |
| field_prompt = parameters["combined_pre_prompt"] + field_prompt |
| plant_prompt = parameters["combined_pre_prompt"] + plant_prompt |
| log_prompt = parameters["combined_pre_prompt"] + log_prompt |
| soil_prompt = parameters["combined_pre_prompt"] + soil_prompt |
| yield_prompt = parameters["combined_pre_prompt"] + yield_prompt |
|
|
| interaction_prompt = parameters["combined_pre_prompt"] + interaction_prompt |
| person_prompt = parameters["combined_pre_prompt"] + person_prompt |
|
|
| trial_prompt = parameters["combined_pre_prompt"] + trial_prompt |
| treatment_prompt = parameters["combined_pre_prompt"] + treatment_prompt |
|
|
| try: |
| |
| print("Getting all responses in pieces, starting with field response") |
|
|
| |
| print("Field prompt") |
| print(field_prompt) |
|
|
| print("Field data input") |
| print(field_data_input) |
| |
| field_response = client.beta.chat.completions.parse( |
| model=model_version, |
| messages=[ |
| {"role": "system", "content": field_prompt}, |
| {"role": "user", "content": field_data_input} |
| ], |
| response_format=FarmActivitiesLite, |
| ) |
|
|
| field_generated_json = field_response.choices[0].message.parsed |
| print(type(field_generated_json)) |
|
|
| |
| print("FIELD JSON: ") |
| field_pretty_json = field_generated_json.dict() |
| print(field_pretty_json) |
|
|
| |
| plant_response = client.beta.chat.completions.parse( |
| model=model_version, |
| messages=[ |
| {"role": "system", "content": plant_prompt}, |
| {"role": "user", "content": planting_data_input} |
| ], |
| response_format=PlantingLite, |
| ) |
|
|
| plant_generated_json = plant_response.choices[0].message.parsed |
|
|
| |
| print("PLANT JSON: ") |
| plant_pretty_json = plant_generated_json.dict() |
| print(plant_pretty_json) |
|
|
| log_response = client.beta.chat.completions.parse( |
| model=model_version, |
| messages=[ |
| {"role": "system", "content": log_prompt}, |
| {"role": "user", "content": log_data_input} |
| ], |
| response_format=Log, |
| ) |
|
|
| log_generated_json = log_response.choices[0].message.parsed |
|
|
| |
| print("LOG JSON: ") |
| log_pretty_json = log_generated_json.dict() |
| print(log_pretty_json) |
|
|
| soil_response = client.beta.chat.completions.parse( |
| model=model_version, |
| messages=[ |
| {"role": "system", "content": soil_prompt}, |
| {"role": "user", "content": soil_data_input} |
| ], |
| response_format=Soil, |
| ) |
|
|
| soil_generated_json = soil_response.choices[0].message.parsed |
|
|
| |
| print("SOIL JSON: ") |
| soil_pretty_json = soil_generated_json.dict() |
| print(soil_pretty_json) |
|
|
| yield_response = client.beta.chat.completions.parse( |
| model=model_version, |
| messages=[ |
| {"role": "system", "content": yield_prompt}, |
| {"role": "user", "content": yield_data_input} |
| ], |
| response_format=Yield, |
| ) |
|
|
| yield_generated_json = yield_response.choices[0].message.parsed |
|
|
| |
| print("YIELD JSON: ") |
| yield_pretty_json = yield_generated_json.dict() |
| print(yield_pretty_json) |
|
|
| plantings = { |
| **plant_pretty_json, |
| "logs": log_pretty_json, |
| "soil": soil_pretty_json, |
| "yield_": yield_pretty_json |
| } |
| |
| farm_activities = { |
| **field_pretty_json, |
| "plantings": plantings |
| } |
|
|
| print("ADDED DICTS") |
| print(farm_activities) |
| print("FINAL JSON: ") |
| final_pretty_farm_activity_json = json.dumps(farm_activities, indent=4) |
| print(final_pretty_farm_activity_json) |
|
|
|
|
| |
| print("Interaction prompt") |
| print(interaction_prompt) |
|
|
| print("Interaction data input") |
| print(interaction_data_input) |
| |
| interaction_response = client.beta.chat.completions.parse( |
| model=model_version, |
| messages=[ |
| {"role": "system", "content": interaction_prompt}, |
| {"role": "user", "content": interaction_data_input} |
| ], |
| response_format=InteractionsLite, |
| ) |
|
|
| interaction_generated_json = interaction_response.choices[0].message.parsed |
| |
| print("INTERACTION JSON: ") |
| interaction_pretty_json = interaction_generated_json.dict() |
| print(interaction_pretty_json) |
|
|
| print("Person prompt") |
| print(person_prompt) |
|
|
| print("Person data input") |
| print(person_data_input) |
| |
| interaction_response = client.beta.chat.completions.parse( |
| model=model_version, |
| messages=[ |
| {"role": "system", "content": person_prompt}, |
| {"role": "user", "content": person_data_input} |
| ], |
| response_format=Person, |
| ) |
|
|
| person_generated_json = person_response.choices[0].message.parsed |
| |
| print("PERSON JSON: ") |
| person_pretty_json = person_generated_json.dict() |
| print(person_pretty_json) |
|
|
| interactions = { |
| **interaction_pretty_json, |
| "people": person_generated_json |
| } |
|
|
| print("ADDED DICTS 2") |
| print(interactions) |
| print("FINAL JSON: ") |
| final_pretty_interactions_json = json.dumps(interactions, indent=4) |
| print(final_pretty_interactions_json) |
|
|
| |
| print("Trial prompt") |
| print(trial_prompt) |
|
|
| print("Trial data input") |
| print(trial_data_input) |
| |
| trial_response = client.beta.chat.completions.parse( |
| model=model_version, |
| messages=[ |
| {"role": "system", "content": trial_prompt}, |
| {"role": "user", "content": trial_data_input} |
| ], |
| response_format=TrialLite, |
| ) |
|
|
| trial_generated_json = trial_response.choices[0].message.parsed |
| |
| print("TRIAL JSON: ") |
| trial_pretty_json = trial_generated_json.dict() |
| print(trial_pretty_json) |
|
|
| print("Treatment prompt") |
| print(treatment_prompt) |
|
|
| print("Treatment data input") |
| print(treatment_data_input) |
| |
| treatment_response = client.beta.chat.completions.parse( |
| model=model_version, |
| messages=[ |
| {"role": "system", "content": treatment_prompt}, |
| {"role": "user", "content": treatment_data_input} |
| ], |
| response_format=Treatment, |
| ) |
|
|
| treatment_generated_json = treatment_response.choices[0].message.parsed |
| |
| print("TREATMENT JSON: ") |
| treatment_pretty_json = treatment_generated_json.dict() |
| print(treatment_pretty_json) |
|
|
| trials = { |
| **trial_pretty_json, |
| "treatments": treatment_generated_json |
| } |
|
|
| print("ADDED DICTS 3") |
| print(trials) |
| print("TREATMENT JSON: ") |
| final_pretty_trials_json = json.dumps(trials, indent=4) |
| print(final_pretty_trials_json) |
|
|
| return final_pretty_farm_activity_json, final_pretty_interactions_json, final_pretty_trials_json |
| except Exception as e: |
| return {"error": "Failed to generate valid JSON. " + str(e)} |
| |
|
|
| def pre_processing(input_data, parameters): |
| """ |
| In the event there's a pre-prompt, process the pre-prompts and input text accordingly |
| |
| Args: |
| input_data: (dict) The input data, preprocessed, from the user. Aka what will fill in the JSON |
| input_data["input_text"] = (str) the preprocessed input text |
| input_data["input_context"] = (str) depending on levers, empty or what is put in front of the prompt |
| input_data["input_text_pieces"] = (dict) containing the individual split up prompt pieces: field_data_input, planting_data_input, log_data_input, soil_data_input, yield_data_input |
| |
| parameters: (dict) All of the individual parameters and "flippers" |
| parameters["model_version"] = (str) what model should be used |
| parameters["chaining"] = (bool) whether or not the preprocessed input context should be chained (given to multiple models) |
| parameters["context_pre_prompt"], parameters["summary_pre_prompt"], parameters["conversation_pre_prompt"], parameters["example_pre_prompt"] = (str) all of the pre-prompts, separated |
| parameters["combined_pre_prompt"] = (str) concatenated individual pre-prompts |
| parameters["additional_json_pieces_options"] = (str) "Explicit specific pieces" or "Parse from one big input text" to indicate whether it's many function calls on one input text or many function calls on smaller pieces of input texts |
| |
| Returns: |
| (dict) input_data |
| input_data["input_context"] = (str) the text which should be used as context or "EMPTY" to indicate there is no context |
| input_data["input_text"] = (str) input text |
| """ |
| print("Starting preprocessing") |
| if input_data["stepwise_json_creation"][0] == "stepwisejsoncreation": |
| print("Stepwise Creation") |
| input_data["input_text_pieces"]["pre_processed_pieces"] = {} |
| |
| if parameters["chaining"]: |
| print("Chaining") |
| for text_label, text_body in input_data["input_text_pieces"].items(): |
| if 'data_input' in text_label: |
| for parameter_name, parameter_value in parameters.items(): |
| if 'pre_prompt' in parameter_name and parameter_value and not isinstance(parameter_value, bool) and text_body: |
| print("Text Label") |
| print(text_label) |
| print("Prompt followed by data entered") |
| print(parameter_value) |
| print(text_body) |
| response = client.chat.completions.create( |
| model=parameters["model_version"], |
| messages=[ |
| {"role": "system", "content": parameter_value}, |
| {"role": "user", "content": text_body} |
| ] |
| ) |
| |
| response_text = response.choices[0].message.content |
| print("Response text") |
| print(response_text) |
| |
| input_data["input_text_pieces"]["pre_processed_pieces"][text_label] = response_text |
|
|
| return input_data |
| |
| if input_data["stepwise_json_creation"][0] == "singlejsoncreation": |
| if parameters["chaining"]: |
| |
| input_text = input_data["input_text"] |
| pre_processing_list = [parameters["context_pre_prompt"], parameters["summary_pre_prompt"], parameters["conversation_pre_prompt"], parameters["example_pre_prompt"]] |
| |
| print("PreProcessingList") |
| print(pre_processing_list) |
| for pre_prompt in pre_processing_list: |
| try: |
| print("Pre-Processing: ") |
| if pre_prompt: |
| print("Prompt: ") |
| print(pre_prompt) |
| print("Input Text: ") |
| print(input_text) |
| print("Model: ") |
| print(parameters["model_version"]) |
| |
| response = client.chat.completions.create( |
| model=parameters["model_version"], |
| messages=[ |
| {"role": "system", "content": pre_prompt}, |
| {"role": "user", "content": input_text} |
| ] |
| ) |
| |
| response_text = response.choices[0].message.content |
| |
| print("Response Text: ") |
| print(response_text) |
| |
| input_text = response_text |
| |
| except Exception as e: |
| print(f"Failed to parse response as JSON. Error was: {e}") |
|
|
| input_data["input_text"] = input_text |
| return input_data |
|
|
| |
| def process_specifications(input_data, parameters): |
| """ |
| Once the parameters and data are processed, do the pre-processing and then generate JSONs |
| |
| Args: |
| input_data: (dict) The input data, preprocessed, from the user. Aka what will fill in the JSON |
| input_data["input_text"] = (str) the preprocessed input text |
| input_data["input_context"] = (str) depending on levers, empty or what is put in front of the prompt |
| input_data["input_text_pieces"] = (dict) containing the individual split up prompt pieces: field_data_input, planting_data_input, log_data_input, soil_data_input, yield_data_input |
| |
| parameters: (dict) All of the individual parameters and "flippers" |
| parameters["pre_prompt"] = (bool) whether or not there is a pre-prompt to process through pre_processing() |
| parameters["model_version"] = (str) what model should be used |
| parameters["chaining"] = (bool) whether or not the preprocessed input context should be chained (given to multiple models) |
| parameters["context_pre_prompt"], parameters["summary_pre_prompt"], parameters["conversation_pre_prompt"], parameters["example_pre_prompt"] = (str) all of the pre-prompts, separated |
| parameters["combined_pre_prompt"] = (str) concatenated individual pre-prompts |
| parameters["additional_json_pieces_options"] = (str) "Explicit specific pieces" or "Parse from one big input text" to indicate whether it's many function calls on one input text or many function calls on smaller pieces of input texts |
| |
| Returns: |
| 3 processed data-filled JSON objects: farm_pretty_json, interactions_pretty_json, trial_pretty_json |
| """ |
| print("Processing specifications") |
| print("Here is also the input data") |
| print(input_data) |
| print("Here is also the parameters") |
| print(parameters) |
| |
| |
| if input_data["stepwise_json_creation"][0] == "stepwisejsoncreation": |
| print("You are continuing with stepwise json creation") |
| if parameters["pre_prompt"] == True: |
| print("You are continuing with pre_prompt processing") |
| processed_input = pre_processing(input_data, parameters) |
| else: |
| print("You have elsed into no pre-processing") |
| processed_input = input_data |
| return generate_json_pieces(processed_input, parameters) |
| elif input_data["stepwise_json_creation"][0] == "singlejsoncreation": |
| print("You are elifing into single json creation") |
| |
| if parameters["pre_prompt"] == True: |
| print("You are preprocessing now") |
| processed_input = pre_processing(input_data, parameters) |
| else: |
| print("You do not have any preprocessing now") |
| processed_input = input_data |
| return generate_json(processed_input, parameters) |
| |
| |
| def parse_survey_stack_parameters(data): |
| """ |
| Parse the incoming parameters from the parameter survey |
| |
| Args: |
| data: (json) JSON retrieved from surveystack API after retrieving survey info/details |
| |
| Returns: |
| processed_data (dict) |
| processed_data["pre_prompt"] = (bool) whether or not there is a pre-prompt to process through pre_processing() |
| processed_data["model_version"] = (str) what model should be used |
| processed_data["chaining"] = (bool) whether or not the preprocessed input context should be chained (given to multiple models) |
| processed_data["context_pre_prompt"], parameters["summary_pre_prompt"], parameters["conversation_pre_prompt"], parameters["example_pre_prompt"] = (str) all of the pre-prompts, separated |
| processed_data["combined_pre_prompt"] = (str) concatenated individual pre-prompts |
| processed_data["additional_json_pieces_options"] = (str) "Explicit specific pieces" or "Parse from one big input text" to indicate whether it's many function calls on one input text or many function calls on smaller pieces of input texts |
| """ |
| processed_data = {} |
|
|
| processed_data["model_version"] = data[0]['data']['modelversion']['value'][0] |
|
|
| print("DATA: ") |
| print(data) |
| |
| try: |
|
|
| print("Extracting parameters") |
|
|
| pre_promp_parameters = data[0]['data']['group_2'] |
|
|
| if pre_promp_parameters['preprompt']['value'][0] == 'continue_preprompts': |
| processed_data["pre_prompt"] = True |
| |
| |
| processed_data["context_pre_prompt"] = pre_promp_parameters.get('contextpreprompt', {}).get('value', None) |
| processed_data["summary_pre_prompt"] = pre_promp_parameters.get('summarypreprompt', {}).get('value', None) |
| processed_data["conversation_pre_prompt"] = pre_promp_parameters.get('conversationpreprompt', {}).get('value', None) |
| processed_data["example_pre_prompt"] = pre_promp_parameters.get('examplepreprompt', {}).get('value', None) |
| |
| |
| chaining_value = pre_promp_parameters.get('prepromptchaining', {}).get('value', [None])[0] |
| |
| if chaining_value == "no": |
| |
| combined_prompt = " ".join( |
| filter(None, [ |
| processed_data["context_pre_prompt"], |
| processed_data["summary_pre_prompt"], |
| processed_data["conversation_pre_prompt"], |
| processed_data["example_pre_prompt"] |
| ]) |
| ) |
| processed_data["chaining"] = False |
| processed_data["combined_pre_prompt"] = combined_prompt |
| else: |
| |
| processed_data["chaining"] = True |
| processed_data["combined_pre_prompt"] = None |
| else: |
| |
| processed_data["pre_prompt"] = False |
| processed_data["context_pre_prompt"] = None |
| processed_data["summary_pre_prompt"] = None |
| processed_data["conversation_pre_prompt"] = None |
| processed_data["example_pre_prompt"] = None |
| processed_data["chaining"] = False |
| processed_data["combined_pre_prompt"] = None |
| |
| except Exception as e: |
| print(f"An error occurred: {e}") |
|
|
| print("Done Extracting parameters:") |
| print(str(processed_data)) |
| return processed_data |
|
|
| def parse_survey_stack_data(data): |
| """ |
| Parse the incoming data from the survey stack survey |
| |
| Args: |
| data: (json) JSON retrieved from surveystack API after retrieving survey info/details |
| |
| Returns: |
| processed_data |
| processed_data["input_text"] = (str) the raw input text |
| """ |
| print("PROCESSING SURVEY STACK DATA") |
| processed_data = {} |
|
|
| print("JUST PRINTING OUT THE DATA FOR YA") |
|
|
| print(data) |
|
|
| |
| processed_data["stepwise_json_creation"] = data[0]['data']['stepwisejsoncreation']['value'] |
| print("STEPWISE?: " + str(processed_data["stepwise_json_creation"])) |
|
|
| if processed_data["stepwise_json_creation"][0] == "stepwisejsoncreation": |
| print("IN THE STEP") |
| farm_management_inputs = data[0]['data']['group_4'] |
| print("FARM MANAGEMENT INPUTS" + str(farm_management_inputs)) |
| |
| processed_data["input_text_pieces"] = {} |
| processed_data["input_text_pieces"]["field_data_input"] = farm_management_inputs.get('field_data_input', {}).get('value', None) |
| processed_data["input_text_pieces"]["planting_data_input"] = farm_management_inputs.get('planting_data_input', {}).get('value', None) |
| processed_data["input_text_pieces"]["log_data_input"] = farm_management_inputs.get('log_data_input', {}).get('value', None) |
| processed_data["input_text_pieces"]["soil_data_input"] = farm_management_inputs.get('soil_data_input', {}).get('value', None) |
| processed_data["input_text_pieces"]["yield_data_input"] = farm_management_inputs.get('yield_data_input', {}).get('value', None) |
| processed_data["input_text"] = "EMPTY" |
|
|
| print("NEXT SCHEMA INPUTS") |
| interactions_inputs = data[0]['data']['group_5'] |
| print("INTERACTIONS INPUTS" + str(interactions_inputs)) |
| processed_data["input_text_pieces"]["interaction_data_input"] = interactions_inputs.get('interaction_data_input', {}).get('value', None) |
| processed_data["input_text_pieces"]["person_data_input"] = interactions_inputs.get('person_data_input', {}).get('value', None) |
|
|
| print("NEXT SCHEMA INPUTS 2") |
| trials_inputs = data[0]['data']['group_6'] |
| print("TRIALS INPUTS" + str(trials_inputs)) |
| processed_data["input_text_pieces"]["trial_data_input"] = trials_inputs.get('trial_data_input', {}).get('value', None) |
| processed_data["input_text_pieces"]["treatment_data_input"] = trials_inputs.get('treatment_data_input', {}).get('value', None) |
|
|
| |
| elif processed_data["stepwise_json_creation"][0] == "singlejsoncreation": |
| print("IN THE SINGLE") |
| processed_data["input_text"] = data[0]['data']['onelonginputtext']['value'] |
| print(processed_data["input_text"]) |
| |
| processed_data["input_text_pieces"] = {} |
| processed_data["input_text_pieces"]["field_data_input"] = "EMPTY" |
| processed_data["input_text_pieces"]["planting_data_input"] = "EMPTY" |
| processed_data["input_text_pieces"]["log_data_input"] = "EMPTY" |
| processed_data["input_text_pieces"]["soil_data_input"] = "EMPTY" |
| processed_data["input_text_pieces"]["yield_data_input"] = "EMPTY" |
|
|
| processed_data["input_text_pieces"]["interaction_data_input"] = "EMPTY" |
| processed_data["input_text_pieces"]["person_data_input"] = "EMPTY" |
|
|
| processed_data["input_text_pieces"]["trial_data_input"] = "EMPTY" |
| processed_data["input_text_pieces"]["treatment_data_input"] = "EMPTY" |
|
|
| print("RETURNING DATA") |
| print(processed_data) |
| |
| return processed_data |
|
|