File size: 37,675 Bytes
e34408d
 
 
 
 
 
 
 
 
22b70e1
9f31d16
915b500
 
 
 
e34408d
 
 
9448805
 
 
 
 
 
 
44f2e28
 
 
9448805
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dd1ec86
 
9448805
 
 
 
 
 
 
 
 
 
 
 
b5f8aca
14b84fe
0aa2def
e34408d
 
915b500
 
 
 
 
 
 
 
 
 
 
 
 
e34408d
aa2d0ec
b3c2c53
0aa2def
b3c2c53
 
 
 
0aa2def
05f03c4
 
 
 
024da2b
 
e34408d
 
b3c2c53
e34408d
 
 
b3c2c53
 
e34408d
 
 
 
 
b3c2c53
 
e34408d
 
 
 
b3c2c53
e34408d
 
 
b3c2c53
 
e34408d
 
 
 
 
 
 
 
 
 
 
 
 
 
b3c2c53
 
e34408d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
915b500
 
 
 
 
 
 
 
 
 
443fc96
915b500
 
 
 
 
 
 
 
 
 
 
aa2d0ec
8c3ce3a
 
 
 
 
 
915b500
 
9fe6dfb
f25d603
 
 
 
d67a1c3
f25d603
9fe6dfb
 
443fc96
9fe6dfb
 
ca500b1
 
 
 
 
 
ee54d53
 
 
 
443fc96
ee54d53
 
 
ca500b1
 
 
 
 
 
d1c6aca
ca500b1
f25d603
d1c6aca
 
 
 
 
 
ca500b1
 
 
 
 
 
 
05f03c4
 
 
 
 
 
d1c6aca
ca500b1
 
 
 
 
 
e34408d
 
f25d603
9711249
f143685
9711249
 
 
 
 
 
e34408d
 
 
d1c6aca
443fc96
e34408d
 
 
42db456
 
105b4d9
42db456
 
 
ed81f77
bda46a2
42db456
e34408d
 
 
 
d1c6aca
443fc96
e34408d
 
 
 
42db456
 
 
 
ed81f77
bda46a2
42db456
e34408d
 
 
d1c6aca
443fc96
e34408d
 
 
 
42db456
 
 
 
ed81f77
bda46a2
42db456
e34408d
 
 
d1c6aca
443fc96
e34408d
 
 
 
42db456
 
 
 
ed81f77
bda46a2
42db456
e34408d
 
 
d1c6aca
443fc96
e34408d
 
 
42db456
 
 
 
 
ed81f77
bda46a2
42db456
ef3a845
 
 
 
 
 
e34408d
ef3a845
 
 
 
e34408d
a26d548
 
bda46a2
f143685
 
524a81b
f143685
 
ca500b1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f143685
ca500b1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f143685
ca500b1
e34408d
 
 
2e0db7d
b3c2c53
915b500
 
 
 
 
 
 
443fc96
915b500
 
 
 
 
 
 
 
 
 
05f03c4
915b500
 
4372acb
1bed5fa
4372acb
14d5937
 
d1c6aca
4372acb
d1c6aca
2be9141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6838299
 
9fe6dfb
63e5d53
9fe6dfb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4a9aba9
9fe6dfb
 
8a8deb5
6838299
 
b3c2c53
 
bd6201c
915b500
 
 
 
 
 
 
443fc96
915b500
 
 
 
 
 
 
 
 
 
 
 
aa2d0ec
e3a049b
 
 
 
 
b5f8aca
8b1ba55
b15e063
9fe6dfb
b15e063
9fe6dfb
 
b15e063
9fe6dfb
 
8b1ba55
b15e063
05f03c4
9fe6dfb
b15e063
9fe6dfb
 
b15e063
9fe6dfb
 
bd6201c
 
9a6b654
915b500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8530295
9a6b654
 
14ebfe3
e2fbcf5
 
 
27da7d9
38c1c61
aa2d0ec
 
08592d7
e2fbcf5
ab95004
27da7d9
 
 
ab95004
 
 
 
27da7d9
 
ab95004
27da7d9
 
 
 
 
 
 
 
 
 
 
 
915b500
27da7d9
 
 
 
c5dfa16
27da7d9
 
 
 
 
 
0aa2def
b3c2c53
27da7d9
 
c5dfa16
aa2d0ec
 
 
9a6b654
8530295
9a6b654
915b500
 
 
 
 
 
 
 
 
 
d1c6aca
9a6b654
 
5fb36e9
 
 
 
8530295
c821785
d1c6aca
c821785
9134911
d1c6aca
a81fb76
 
 
9fe6dfb
 
 
 
 
 
c821785
f143685
 
 
 
ca500b1
 
f143685
 
 
 
ca500b1
 
f143685
 
9134911
d1c6aca
c821785
a81fb76
 
9fe6dfb
 
 
 
 
 
c821785
ca500b1
 
f143685
ca500b1
 
f143685
d1c6aca
 
c821785
8530295
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
import os
from pydantic import BaseModel, Field, validator, ValidationError
import gradio as gr
from openai import OpenAI
from typing import List, Dict, Any, Optional, Literal, Union
from enum import Enum
from gradio_toggle import Toggle
import json

from schema_classes import FarmActivities, Interactions, Trial, FarmActivitiesLite, PlantingLite, Log, Soil, Yield, InteractionsLite, TrialLite


# This API key must be in a "secret" in your environment. This is generated from OpenAI or the company's website that creates the model you wish to engage with. 
# To use other models, some other endpoints would need to slightly change
# As is, the endpoint used requires a model that is capable of OpenAI's structured outputs.
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
client = OpenAI()

# What the survey should look like: 

# Do you want to enter your text in one big block (free-form style) or in individual pieces (form-based style)? ###

# Free-form style means that a single JSON will be created from the block of text. This JSON will have a single level of nested that was created by the model. 
# Form-based style means that individual JSON pieces will be created from different pieces of text. You will have a few more prompts to enter in this case. These JSON pieces will be manually combined with code. This JSON will also have a single level of nested, that was manually created.
# Parse either one big block of text conditionally or individual pieces conditionally 
# values = big-block-input-text, individual-pieces-input-text

# actual long text value = onelonginputtext

# What model version do you want to process your input data?  ###
# Parse ['gpt-4o-mini-2024-07-18', 'gpt-4o-2024-08-06']

# Do you want to pre-process your data? We will feed your data to a conversational model prior to creating the schema, with a prompt of your choosing.
# Parse ['yes', 'no']

# Do you want to enter multiple pre-prompts? Or will you only be entering one? You can enter up to three separate pre-prompts at this time.
# Parse ['yes', 'no']

# Do you have a specific pre-processing style in mind? This is just for data collection purposes. (Allow to pick multiple)

# Not specified means you just want to put text in and it doesn't fit the other categories
# Summarization means you're going to ask the model to produce some sort of summary as your pre-processing step.
# Specific field extraction means you're going to ask the models to extract some details as your pre-processing step.
# Parse possibly multiple values ['not_specified', 'summarization', 'specific-field-extraction']

# Parse [1-3 pre_processing_prompts text boxes]

# Now it is time to enter your prompts. The number of prompts will be directly related to which input data form you've chosen. 
# This is the input data that will be parsed with this prompt: 

# Input data here ###
# This is a sample prompt that you can choose or create your own: 
    # These options if free-form with same text for each
    # Schema 1 prompts:
    #farm_prompt = "Extract the farm information."
    # Schema 2 prompts:
    #interactions_prompt = "Extract the interactions information."
    # Schema 3 prompts:
    #trial_prompt = "Extract the trial information."

    # Schema 1 prompts:
    #field_prompt = "Extract the field information."
    #plant_prompt = "Extract the planting information."
    #log_prompt = "Extract the log information."
    #soil_prompt = "Extract the soil information."
    #yield_prompt = "Extract the yield information."
    # Schema 2 prompts:
    #interaction_prompt = "Extract the interaction information"
    #person_prompt = "Please provide a list of people involved in this interaction, with each person's name, role, and any other relevant details."
    # Schema 3 prompts:
    #trial_prompt = "Extract the trial information"
    #treatment_prompt = "Please provide a list of different treatments (strips or blocks with the same conditions applied) performed by the partner."


# Parameters - The Levers and Flippers to be chosen 
# Use this for reference for now then work backwards
# Otter.ai-summary means that you've already pre-processed your input data using otter.ai and you don't ##### in this event it's just confusing don't include
#all_options = {
#    'model_version': ['gpt-4o-mini-2024-07-18 (Smaller version for faster responses)', 'gpt-4o-2024-08-06 (Latest GPT model with structured outputs)'],
#    'input_modality': ['free-text-input / single-JSON-creation (model creates entire JSON) / single-level-nested', 'form-text-input / stepwise-JSON-creation (individual pieces created then manually combined) / no-nesting (flat schema)'],
#    'pre_processing': ['yes', 'no'],
#    'pre_processing_multiple': ['yes', 'no'],
#    'pre_processing_specification': ['not_specified', 'summarization', 'specific-field-extraction'],
#    'prompting_style': ['no_specific_style', 'example_driven', 'role_specific', 'step_by_step', 'error_detection'],
#    'input_text' = ['whole_input_text': "value", 'input_text_pieces': ["piece_1": "value", "piece_2": "value"]],
#    'pre_processing_prompt' = ['pre_processing_prompts': ["prompt_1": "value", "prompt_2": "value"]],
#    'prompt' = ['prompts': ["prompt1": "value", "prompt2", "value"]]
#}


def generate_json(input_data, parameters): 
    """
    Function to prompt OpenAI API to generate structured JSON output.

    Args: 
        input_data: (dict) The input data, preprocessed, from the user. Aka what will fill in the JSON
            input_data["input_text"] = the preprocessed input text
            input_data["input_context"] = depending on levers, empty or what is put in front of the prompt
        parameters: (dict) All of the individual parameters and "flippers" 
            parameters["model_version"] = (str) what model should be used 
            parameters["chaining"] = (bool) whether or not the preprocessed input context should be chained (given to multiple models)
            parameters["context_pre_prompt"], parameters["summary_pre_prompt"], parameters["conversation_pre_prompt"], parameters["example_pre_prompt"] = (str) all of the pre-prompts, separated
            parameters["combined_pre_prompt"] = (str) concatenated individual pre-prompts

    Returns: 
        3 processed data-filled JSON objects: farm_pretty_json, interactions_pretty_json, trial_pretty_json
    """
    print("Generating JSON Whole!")
    input_text = input_data["input_text"]
    model_version = parameters["model_version"]

    farm_prompt = "Extract the farm information."
    interactions_prompt = "Extract the interactions information."
    trial_prompt = "Extract the trial information."
    
    if parameters["combined_pre_prompt"]:
        farm_prompt = parameters["combined_pre_prompt"] + farm_prompt
        interactions_prompt = parameters["combined_pre_prompt"] + interactions_prompt
        trial_prompt = parameters["combined_pre_prompt"] + trial_prompt

        
    try:
        #Call OpenAI API to generate structured output based on prompt
        
        farm_info_response = client.beta.chat.completions.parse(
            model=model_version,  # Use GPT model that supports structured output
            messages=[
                {"role": "system", "content": farm_prompt},
                {"role": "user", "content": input_text}
            ],
            response_format=FarmActivities,
        )

        farm_generated_json = farm_info_response.choices[0].message.parsed

        
        print("FARM JSON: ")
        print(farm_generated_json) # debugging
        farm_pretty_json = farm_generated_json.json()

        
        interactions_response = client.beta.chat.completions.parse(
            model=model_version,  # Use GPT model that supports structured output
            messages=[
                {"role": "system", "content": interactions_prompt},
                {"role": "user", "content": input_text}
            ],
            response_format=Interactions,
        )
            
        interactions_generated_json = interactions_response.choices[0].message.parsed

        print("INTERACTIONS JSON: ")
        print(interactions_generated_json) # debugging 2
        interactions_pretty_json = interactions_generated_json.json()


        trial_response = client.beta.chat.completions.parse(
            model=model_version,  # Use GPT model that supports structured output
            messages=[
                {"role": "system", "content": trial_prompt},
                {"role": "user", "content": input_text}
            ],
            response_format=Trial,
        )
        
        trial_generated_json = trial_response.choices[0].message.parsed

        print("TRIALS JSON: ")
        print(trial_generated_json) # debugging 3

        trial_pretty_json = trial_generated_json.json()

        return farm_pretty_json, interactions_pretty_json, trial_pretty_json

    except ValidationError as e:
        return {"error": str(e)}
    except Exception as e:
        return {"error": "Failed to generate valid JSON. " + str(e)}

# This is for the step-wise JSON creation
def generate_json_pieces(input_data, parameters): 
    """
    This is primarily for one of the flippers, which allows each individual JSON section to be created individually, then concatenates them all together. 
    It is proposed that perhaps the individual calls to the model will be more robust than giving the model all the data at once. 

    Args: 
        Args: 
        input_data: (dict) The input data, preprocessed, from the user. Aka what will fill in the JSON
            input_data["input_text"] = (str) the preprocessed input text
            input_data["input_context"] = (str) depending on levers, empty or what is put in front of the prompt
            input_data["input_text_pieces"] = (dict) containing the individual split up prompt pieces: field_data_input, planting_data_input, log_data_input, soil_data_input, yield_data_input
            
        parameters: (dict) All of the individual parameters and "flippers" 
            parameters["model_version"] = (str) what model should be used 
            parameters["chaining"] = (bool) whether or not the preprocessed input context should be chained (given to multiple models)
            parameters["context_pre_prompt"], parameters["summary_pre_prompt"], parameters["conversation_pre_prompt"], parameters["example_pre_prompt"] = (str) all of the pre-prompts, separated
            parameters["combined_pre_prompt"] = (str) concatenated individual pre-prompts
            parameters["additional_json_pieces_options"] = (str) "Explicit specific pieces" or "Parse from one big input text" to indicate whether it's many function calls on one input text or many function calls on smaller pieces of input texts

    Returns: 
        (str - json) A final combined JSON containing the data filled schema for Farm Activites
    """
    print("Generating JSON Pieces!")
    
    print("INPUT DATA")
    print(input_data)
    print("PARAMS")
    print(parameters)
    
    specification = input_data["input_text"]
    model_version = parameters["model_version"]

    print("Specification and Model Version")
    print(specification)
    print(model_version)
    
    if parameters["pre_prompt"] and parameters["chaining"]:
        print("Pre prompt is true")
        field_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["field_data_input"]
        planting_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["planting_data_input"]
        log_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["log_data_input"]
        soil_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["soil_data_input"] 
        yield_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["yield_data_input"] 

        interaction_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["interaction_data_input"]
        person_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["person_data_input"]

        trial_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["trial_data_input"]
        treatment_data_input = input_data["input_text_pieces"]["pre_processed_pieces"]["treatment_data_input"]
    else:
        print("Pre prompt is false")
        field_data_input = input_data["input_text_pieces"]["field_data_input"]
        planting_data_input = input_data["input_text_pieces"]["planting_data_input"]
        log_data_input = input_data["input_text_pieces"]["log_data_input"]
        soil_data_input = input_data["input_text_pieces"]["soil_data_input"] 
        yield_data_input = input_data["input_text_pieces"]["yield_data_input"] 

        interaction_data_input = input_data["input_text_pieces"]["interaction_data_input"]
        person_data_input = input_data["input_text_pieces"]["person_data_input"]

        trial_data_input = input_data["input_text_pieces"]["trial_data_input"]
        treatment_data_input = input_data["input_text_pieces"]["treatment_data_input"]


    # Fix these prompts for all 
    print("Setting prompts")
    field_prompt = "Extract the field information."
    plant_prompt = "Extract the planting information."
    log_prompt = "Extract the log information."
    soil_prompt = "Extract the soil information."
    yield_prompt = "Extract the yield information."

    interaction_prompt = "Extract the interaction information"
    person_prompt = "Please provide a list of people involved in this interaction, with each person's name, role, and any other relevant details."

    trial_prompt = "Extract the trial information"
    treatment_prompt = "Please provide a list of different treatments (strips or blocks with the same conditions applied) performed by the partner."


    if parameters["combined_pre_prompt"]:
        field_prompt = parameters["combined_pre_prompt"] + field_prompt
        plant_prompt = parameters["combined_pre_prompt"] + plant_prompt
        log_prompt = parameters["combined_pre_prompt"] + log_prompt
        soil_prompt = parameters["combined_pre_prompt"] + soil_prompt
        yield_prompt = parameters["combined_pre_prompt"] + yield_prompt

        interaction_prompt = parameters["combined_pre_prompt"] + interaction_prompt
        person_prompt = parameters["combined_pre_prompt"] + person_prompt

        trial_prompt = parameters["combined_pre_prompt"] + trial_prompt
        treatment_prompt = parameters["combined_pre_prompt"] + treatment_prompt

    try:
        # Call OpenAI API to generate structured output based on prompt
        print("Getting all responses in pieces, starting with field response")

        # All of this is for the first schema for farm activities 
        print("Field prompt")
        print(field_prompt)

        print("Field data input")
        print(field_data_input)
        
        field_response = client.beta.chat.completions.parse(
            model=model_version,  # Use GPT model that supports structured output
            messages=[
                {"role": "system", "content": field_prompt},
                {"role": "user", "content": field_data_input}
            ],
            response_format=FarmActivitiesLite,
        )

        field_generated_json = field_response.choices[0].message.parsed
        print(type(field_generated_json))

        
        print("FIELD JSON: ")
        field_pretty_json = field_generated_json.dict()
        print(field_pretty_json) # debugging

        
        plant_response = client.beta.chat.completions.parse(
            model=model_version,  # Use GPT model that supports structured output
            messages=[
                {"role": "system", "content": plant_prompt},
                {"role": "user", "content": planting_data_input}
            ],
            response_format=PlantingLite,
        )

        plant_generated_json = plant_response.choices[0].message.parsed

        
        print("PLANT JSON: ")
        plant_pretty_json = plant_generated_json.dict()
        print(plant_pretty_json) # debugging

        log_response = client.beta.chat.completions.parse(
            model=model_version,  # Use GPT model that supports structured output
            messages=[
                {"role": "system", "content": log_prompt},
                {"role": "user", "content": log_data_input}
            ],
            response_format=Log,
        )

        log_generated_json = log_response.choices[0].message.parsed

        
        print("LOG JSON: ")
        log_pretty_json = log_generated_json.dict()
        print(log_pretty_json) # debugging

        soil_response = client.beta.chat.completions.parse(
            model=model_version,  # Use GPT model that supports structured output
            messages=[
                {"role": "system", "content": soil_prompt},
                {"role": "user", "content": soil_data_input}
            ],
            response_format=Soil,
        )

        soil_generated_json = soil_response.choices[0].message.parsed

        
        print("SOIL JSON: ")
        soil_pretty_json = soil_generated_json.dict()
        print(soil_pretty_json) # debugging

        yield_response = client.beta.chat.completions.parse(
            model=model_version,  # Use GPT model that supports structured output
            messages=[
                {"role": "system", "content": yield_prompt},
                {"role": "user", "content": yield_data_input}
            ],
            response_format=Yield,
        )

        yield_generated_json = yield_response.choices[0].message.parsed

        
        print("YIELD JSON: ")
        yield_pretty_json = yield_generated_json.dict()
        print(yield_pretty_json) # debugging

        plantings = {
            **plant_pretty_json,
            "logs": log_pretty_json,
            "soil": soil_pretty_json,
            "yield_": yield_pretty_json
        }
        
        farm_activities = {
            **field_pretty_json,
            "plantings": plantings
        }

        print("ADDED DICTS")
        print(farm_activities)
        print("FINAL JSON: ")
        final_pretty_farm_activity_json = json.dumps(farm_activities, indent=4)
        print(final_pretty_farm_activity_json)


        # This is for the second schema now, interactions
        print("Interaction prompt")
        print(interaction_prompt)

        print("Interaction data input")
        print(interaction_data_input)
        
        interaction_response = client.beta.chat.completions.parse(
            model=model_version,  # Use GPT model that supports structured output
            messages=[
                {"role": "system", "content": interaction_prompt},
                {"role": "user", "content": interaction_data_input}
            ],
            response_format=InteractionsLite,
        )

        interaction_generated_json = interaction_response.choices[0].message.parsed
        
        print("INTERACTION JSON: ")
        interaction_pretty_json = interaction_generated_json.dict()
        print(interaction_pretty_json) # debugging

        print("Person prompt")
        print(person_prompt)

        print("Person data input")
        print(person_data_input)
        
        interaction_response = client.beta.chat.completions.parse(
            model=model_version,  # Use GPT model that supports structured output
            messages=[
                {"role": "system", "content": person_prompt},
                {"role": "user", "content": person_data_input}
            ],
            response_format=Person,
        )

        person_generated_json = person_response.choices[0].message.parsed
        
        print("PERSON JSON: ")
        person_pretty_json = person_generated_json.dict()
        print(person_pretty_json) # debugging

        interactions = {
            **interaction_pretty_json,
            "people": person_generated_json
        }

        print("ADDED DICTS 2")
        print(interactions)
        print("FINAL JSON: ")
        final_pretty_interactions_json = json.dumps(interactions, indent=4)
        print(final_pretty_interactions_json)

        # This is for the third schema now, trials
        print("Trial prompt")
        print(trial_prompt)

        print("Trial data input")
        print(trial_data_input)
        
        trial_response = client.beta.chat.completions.parse(
            model=model_version,  # Use GPT model that supports structured output
            messages=[
                {"role": "system", "content": trial_prompt},
                {"role": "user", "content": trial_data_input}
            ],
            response_format=TrialLite,
        )

        trial_generated_json = trial_response.choices[0].message.parsed
        
        print("TRIAL JSON: ")
        trial_pretty_json = trial_generated_json.dict()
        print(trial_pretty_json) # debugging

        print("Treatment prompt")
        print(treatment_prompt)

        print("Treatment data input")
        print(treatment_data_input)
        
        treatment_response = client.beta.chat.completions.parse(
            model=model_version,  # Use GPT model that supports structured output
            messages=[
                {"role": "system", "content": treatment_prompt},
                {"role": "user", "content": treatment_data_input}
            ],
            response_format=Treatment,
        )

        treatment_generated_json = treatment_response.choices[0].message.parsed
        
        print("TREATMENT JSON: ")
        treatment_pretty_json = treatment_generated_json.dict()
        print(treatment_pretty_json) # debugging

        trials = {
            **trial_pretty_json,
            "treatments": treatment_generated_json
        }

        print("ADDED DICTS 3")
        print(trials)
        print("TREATMENT JSON: ")
        final_pretty_trials_json = json.dumps(trials, indent=4)
        print(final_pretty_trials_json)

        return final_pretty_farm_activity_json, final_pretty_interactions_json, final_pretty_trials_json
    except Exception as e:
        return {"error": "Failed to generate valid JSON. " + str(e)}
    

def pre_processing(input_data, parameters):
    """
    In the event there's a pre-prompt, process the pre-prompts and input text accordingly

    Args: 
        input_data: (dict) The input data, preprocessed, from the user. Aka what will fill in the JSON
            input_data["input_text"] = (str) the preprocessed input text
            input_data["input_context"] = (str) depending on levers, empty or what is put in front of the prompt
            input_data["input_text_pieces"] = (dict) containing the individual split up prompt pieces: field_data_input, planting_data_input, log_data_input, soil_data_input, yield_data_input
            
        parameters: (dict) All of the individual parameters and "flippers" 
            parameters["model_version"] = (str) what model should be used 
            parameters["chaining"] = (bool) whether or not the preprocessed input context should be chained (given to multiple models)
            parameters["context_pre_prompt"], parameters["summary_pre_prompt"], parameters["conversation_pre_prompt"], parameters["example_pre_prompt"] = (str) all of the pre-prompts, separated
            parameters["combined_pre_prompt"] = (str) concatenated individual pre-prompts
            parameters["additional_json_pieces_options"] = (str) "Explicit specific pieces" or "Parse from one big input text" to indicate whether it's many function calls on one input text or many function calls on smaller pieces of input texts

    Returns: 
        (dict) input_data
        input_data["input_context"] = (str) the text which should be used as context or "EMPTY" to indicate there is no context
        input_data["input_text"] = (str) input text
    """
    print("Starting preprocessing")
    if input_data["stepwise_json_creation"][0] == "stepwisejsoncreation":
        print("Stepwise Creation")
        input_data["input_text_pieces"]["pre_processed_pieces"] = {}
        
        if parameters["chaining"]:
            print("Chaining")
            for text_label, text_body in input_data["input_text_pieces"].items():
                if 'data_input' in text_label:
                    for parameter_name, parameter_value in parameters.items():
                        if 'pre_prompt' in parameter_name and parameter_value and not isinstance(parameter_value, bool) and text_body:
                            print("Text Label")
                            print(text_label)
                            print("Prompt followed by data entered")
                            print(parameter_value)
                            print(text_body)
                            response = client.chat.completions.create(
                                            model=parameters["model_version"],
                                            messages=[
                                                {"role": "system", "content": parameter_value},
                                                {"role": "user", "content": text_body}
                                            ]
                                        )
                                        
                            response_text = response.choices[0].message.content
                            print("Response text")
                            print(response_text)
                            
                            input_data["input_text_pieces"]["pre_processed_pieces"][text_label] = response_text

        return input_data
        
    if input_data["stepwise_json_creation"][0] == "singlejsoncreation":    
        if parameters["chaining"]:
    
            input_text = input_data["input_text"]
            pre_processing_list = [parameters["context_pre_prompt"], parameters["summary_pre_prompt"], parameters["conversation_pre_prompt"], parameters["example_pre_prompt"]]
    
            print("PreProcessingList")
            print(pre_processing_list)
            for pre_prompt in pre_processing_list:
                try:
                    print("Pre-Processing: ")
                    if pre_prompt:
                        print("Prompt: ")
                        print(pre_prompt)
                        print("Input Text: ")
                        print(input_text)
                        print("Model: ")
                        print(parameters["model_version"])
                        
                        response = client.chat.completions.create(
                            model=parameters["model_version"],
                            messages=[
                                {"role": "system", "content": pre_prompt},
                                {"role": "user", "content": input_text}
                            ]
                        )
                        
                        response_text = response.choices[0].message.content
                        
                        print("Response Text: ")
                        print(response_text)
                        
                        input_text = response_text
                    
                except Exception as e:
                    print(f"Failed to parse response as JSON. Error was: {e}")

            input_data["input_text"] = input_text
            return input_data

    
def process_specifications(input_data, parameters):
    """
    Once the parameters and data are processed, do the pre-processing and then generate JSONs

    Args: 
        input_data: (dict) The input data, preprocessed, from the user. Aka what will fill in the JSON
            input_data["input_text"] = (str) the preprocessed input text
            input_data["input_context"] = (str) depending on levers, empty or what is put in front of the prompt
            input_data["input_text_pieces"] = (dict) containing the individual split up prompt pieces: field_data_input, planting_data_input, log_data_input, soil_data_input, yield_data_input
            
        parameters: (dict) All of the individual parameters and "flippers" 
            parameters["pre_prompt"] = (bool) whether or not there is a pre-prompt to process through pre_processing()
            parameters["model_version"] = (str) what model should be used 
            parameters["chaining"] = (bool) whether or not the preprocessed input context should be chained (given to multiple models)
            parameters["context_pre_prompt"], parameters["summary_pre_prompt"], parameters["conversation_pre_prompt"], parameters["example_pre_prompt"] = (str) all of the pre-prompts, separated
            parameters["combined_pre_prompt"] = (str) concatenated individual pre-prompts
            parameters["additional_json_pieces_options"] = (str) "Explicit specific pieces" or "Parse from one big input text" to indicate whether it's many function calls on one input text or many function calls on smaller pieces of input texts

    Returns: 
        3 processed data-filled JSON objects: farm_pretty_json, interactions_pretty_json, trial_pretty_json
    """
    print("Processing specifications")
    print("Here is also the input data")
    print(input_data)
    print("Here is also the parameters")
    print(parameters)
    
    # here is where parsing and other things will happen before 
    if input_data["stepwise_json_creation"][0] == "stepwisejsoncreation":
        print("You are continuing with stepwise json creation")
        if parameters["pre_prompt"] == True:
            print("You are continuing with pre_prompt processing")
            processed_input = pre_processing(input_data, parameters)
        else: 
            print("You have elsed into no pre-processing")
            processed_input = input_data
        return generate_json_pieces(processed_input, parameters)
    elif input_data["stepwise_json_creation"][0] == "singlejsoncreation":
        print("You are elifing into single json creation")
        #input_data["input_context"] = "EMPTY"
        if parameters["pre_prompt"] == True:
            print("You are preprocessing now")
            processed_input = pre_processing(input_data, parameters)
        else: 
            print("You do not have any preprocessing now")
            processed_input = input_data
        return generate_json(processed_input, parameters)
    
    
def parse_survey_stack_parameters(data):
    """
    Parse the incoming parameters from the parameter survey

    Args: 
        data: (json) JSON retrieved from surveystack API after retrieving survey info/details

    Returns:
        processed_data (dict)
            processed_data["pre_prompt"] = (bool) whether or not there is a pre-prompt to process through pre_processing()
            processed_data["model_version"] = (str) what model should be used 
            processed_data["chaining"] = (bool) whether or not the preprocessed input context should be chained (given to multiple models)
            processed_data["context_pre_prompt"], parameters["summary_pre_prompt"], parameters["conversation_pre_prompt"], parameters["example_pre_prompt"] = (str) all of the pre-prompts, separated
            processed_data["combined_pre_prompt"] = (str) concatenated individual pre-prompts
            processed_data["additional_json_pieces_options"] = (str) "Explicit specific pieces" or "Parse from one big input text" to indicate whether it's many function calls on one input text or many function calls on smaller pieces of input texts
    """
    processed_data = {}

    processed_data["model_version"] = data[0]['data']['modelversion']['value'][0]

    print("DATA: ")
    print(data)
    
    try:

        print("Extracting parameters")

        pre_promp_parameters = data[0]['data']['group_2']

        if pre_promp_parameters['preprompt']['value'][0] == 'continue_preprompts':
            processed_data["pre_prompt"] = True
    
            # Accessing context and other prompts, with defaults in case they are None
            processed_data["context_pre_prompt"] = pre_promp_parameters.get('contextpreprompt', {}).get('value', None)
            processed_data["summary_pre_prompt"] = pre_promp_parameters.get('summarypreprompt', {}).get('value', None)
            processed_data["conversation_pre_prompt"] = pre_promp_parameters.get('conversationpreprompt', {}).get('value', None)
            processed_data["example_pre_prompt"] = pre_promp_parameters.get('examplepreprompt', {}).get('value', None)
            
            # Check if chaining is set to "yes" or "no"
            chaining_value = pre_promp_parameters.get('prepromptchaining', {}).get('value', [None])[0]
            
            if chaining_value == "no":
                # Combine prompts if chaining is "no"
                combined_prompt = " ".join(
                    filter(None, [
                        processed_data["context_pre_prompt"], 
                        processed_data["summary_pre_prompt"], 
                        processed_data["conversation_pre_prompt"], 
                        processed_data["example_pre_prompt"]
                    ])
                )
                processed_data["chaining"] = False
                processed_data["combined_pre_prompt"] = combined_prompt
            else:
                # Set combined_pre_prompt to None if chaining is enabled
                processed_data["chaining"] = True
                processed_data["combined_pre_prompt"] = None
        else:
            # Set fields to None if preprompt is not "continue_preprompts"
            processed_data["pre_prompt"] = False
            processed_data["context_pre_prompt"] = None
            processed_data["summary_pre_prompt"] = None
            processed_data["conversation_pre_prompt"] = None
            processed_data["example_pre_prompt"] = None
            processed_data["chaining"] = False
            processed_data["combined_pre_prompt"] = None
            
    except Exception as e:
        print(f"An error occurred: {e}")

    print("Done Extracting parameters:")
    print(str(processed_data))
    return processed_data

def parse_survey_stack_data(data):
    """
    Parse the incoming data from the survey stack survey

    Args: 
        data: (json) JSON retrieved from surveystack API after retrieving survey info/details

    Returns: 
        processed_data
            processed_data["input_text"] = (str) the raw input text
    """
    print("PROCESSING SURVEY STACK DATA")
    processed_data = {}

    print("JUST PRINTING OUT THE DATA FOR YA")

    print(data)

    
    processed_data["stepwise_json_creation"] = data[0]['data']['stepwisejsoncreation']['value']
    print("STEPWISE?: " + str(processed_data["stepwise_json_creation"]))

    if processed_data["stepwise_json_creation"][0] == "stepwisejsoncreation":
        print("IN THE STEP")
        farm_management_inputs = data[0]['data']['group_4']
        print("FARM MANAGEMENT INPUTS" + str(farm_management_inputs))
        
        processed_data["input_text_pieces"] = {}
        processed_data["input_text_pieces"]["field_data_input"] = farm_management_inputs.get('field_data_input', {}).get('value', None)
        processed_data["input_text_pieces"]["planting_data_input"] = farm_management_inputs.get('planting_data_input', {}).get('value', None)
        processed_data["input_text_pieces"]["log_data_input"] = farm_management_inputs.get('log_data_input', {}).get('value', None)
        processed_data["input_text_pieces"]["soil_data_input"] = farm_management_inputs.get('soil_data_input', {}).get('value', None)
        processed_data["input_text_pieces"]["yield_data_input"] = farm_management_inputs.get('yield_data_input', {}).get('value', None)
        processed_data["input_text"] = "EMPTY"

        print("NEXT SCHEMA INPUTS")
        interactions_inputs = data[0]['data']['group_5']
        print("INTERACTIONS INPUTS" + str(interactions_inputs))
        processed_data["input_text_pieces"]["interaction_data_input"] = interactions_inputs.get('interaction_data_input', {}).get('value', None)
        processed_data["input_text_pieces"]["person_data_input"] = interactions_inputs.get('person_data_input', {}).get('value', None)

        print("NEXT SCHEMA INPUTS 2")
        trials_inputs = data[0]['data']['group_6']
        print("TRIALS INPUTS" + str(trials_inputs))
        processed_data["input_text_pieces"]["trial_data_input"] = trials_inputs.get('trial_data_input', {}).get('value', None)
        processed_data["input_text_pieces"]["treatment_data_input"] = trials_inputs.get('treatment_data_input', {}).get('value', None)

        
    elif processed_data["stepwise_json_creation"][0] == "singlejsoncreation":
        print("IN THE SINGLE")
        processed_data["input_text"] = data[0]['data']['onelonginputtext']['value']
        print(processed_data["input_text"])
        
        processed_data["input_text_pieces"] = {}
        processed_data["input_text_pieces"]["field_data_input"] = "EMPTY"
        processed_data["input_text_pieces"]["planting_data_input"] = "EMPTY"
        processed_data["input_text_pieces"]["log_data_input"] = "EMPTY"
        processed_data["input_text_pieces"]["soil_data_input"] = "EMPTY"
        processed_data["input_text_pieces"]["yield_data_input"] = "EMPTY"

        processed_data["input_text_pieces"]["interaction_data_input"] = "EMPTY"
        processed_data["input_text_pieces"]["person_data_input"] = "EMPTY"

        processed_data["input_text_pieces"]["trial_data_input"] = "EMPTY"
        processed_data["input_text_pieces"]["treatment_data_input"] = "EMPTY"

    print("RETURNING DATA")
    print(processed_data)
        
    return processed_data