DJHumanRPT commited on
Commit
e44cf0b
·
verified ·
1 Parent(s): b502ced

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -4
app.py CHANGED
@@ -786,11 +786,19 @@ def generate_synthetic_outputs(
786
  # Format output variable information for the prompt
787
  output_vars_text = "\n".join(
788
  [
789
- f"Key: {var['name']}: {var['description']} (Type: {var['type']}) {'Options: '+str(var['options']) if var.get('options') else ''}"
790
  for var in output_vars
791
  ]
792
  )
793
 
 
 
 
 
 
 
 
 
794
  output_format = "{"
795
  for var in output_vars:
796
  output_format += f'"{var["name"]}": output, '
@@ -816,6 +824,9 @@ def generate_synthetic_outputs(
816
  generation_prompt = f"""
817
  You are generating synthetic output data based on the following input:
818
 
 
 
 
819
  INPUT DATA:
820
  {json.dumps(input_item, indent=2)}
821
 
@@ -833,7 +844,7 @@ The response must be valid JSON that can be parsed directly.
833
  """
834
 
835
  output_data = None
836
-
837
  for attempt in range(max_retries):
838
  try:
839
  response = client.chat.completions.create(
@@ -939,6 +950,7 @@ The response must be valid JSON that can be parsed directly.
939
 
940
  return results
941
 
 
942
  def suggest_variable_values_from_kb(
943
  variable_name, variable_type, knowledge_base, client, model="gpt-3.5-turbo"
944
  ):
@@ -1955,10 +1967,39 @@ with tab2:
1955
  )
1956
 
1957
  # Call LLM with the filled prompt
 
 
 
 
 
 
 
1958
  with st.spinner("Generating output..."):
1959
  model_selected = st.session_state.model
1960
- generated_output = call_llm(filled_prompt, model=model_selected)
1961
- st.session_state.generated_output = generated_output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1962
 
1963
  # Display generated output
1964
  if (
 
786
  # Format output variable information for the prompt
787
  output_vars_text = "\n".join(
788
  [
789
+ f"- {var['name']}: {var['description']} (Type: {var['type']}) {'Options: '+str(var['options']) if var.get('options') else ''}"
790
  for var in output_vars
791
  ]
792
  )
793
 
794
+ input_vars = template_spec["input"]
795
+ input_vars_text = "\n".join(
796
+ [
797
+ f"- {var['name']}: {var['description']} (Type: {var['type']}) {'Options: '+str(var['options']) if var.get('options') else ''}"
798
+ for var in input_vars
799
+ ]
800
+ )
801
+
802
  output_format = "{"
803
  for var in output_vars:
804
  output_format += f'"{var["name"]}": output, '
 
824
  generation_prompt = f"""
825
  You are generating synthetic output data based on the following input:
826
 
827
+ DEFINITION OF INPUT VARIABLES:
828
+ {input_vars_text}
829
+
830
  INPUT DATA:
831
  {json.dumps(input_item, indent=2)}
832
 
 
844
  """
845
 
846
  output_data = None
847
+ print(generation_prompt)
848
  for attempt in range(max_retries):
849
  try:
850
  response = client.chat.completions.create(
 
950
 
951
  return results
952
 
953
+
954
  def suggest_variable_values_from_kb(
955
  variable_name, variable_type, knowledge_base, client, model="gpt-3.5-turbo"
956
  ):
 
1967
  )
1968
 
1969
  # Call LLM with the filled prompt
1970
+ # Create a single input data item from user inputs
1971
+ input_data = [st.session_state.user_inputs.copy()]
1972
+
1973
+ # Create a copy of the template spec
1974
+ template_spec_copy = st.session_state.template_spec.copy()
1975
+
1976
+ # Call generate_synthetic_outputs with the input data
1977
  with st.spinner("Generating output..."):
1978
  model_selected = st.session_state.model
1979
+ generated_outputs = generate_synthetic_outputs(
1980
+ template_spec_copy,
1981
+ input_data,
1982
+ st.session_state.knowledge_base,
1983
+ max_retries=3,
1984
+ )
1985
+
1986
+ # Extract the first output (since we only have one input)
1987
+ if generated_outputs and len(generated_outputs) > 0:
1988
+ # The output contains both input and output fields
1989
+ # We only want to display the output fields
1990
+ output_vars = [
1991
+ var["name"] for var in template_spec_copy["output"]
1992
+ ]
1993
+ output_data = {
1994
+ k: v
1995
+ for k, v in generated_outputs[0].items()
1996
+ if k in output_vars
1997
+ }
1998
+ st.session_state.generated_output = output_data
1999
+ else:
2000
+ st.session_state.generated_output = {
2001
+ "error": "Failed to generate output"
2002
+ }
2003
 
2004
  # Display generated output
2005
  if (