JatinAutonomousLabs commited on
Commit
8dc32dd
·
verified ·
1 Parent(s): 89ef3c5

Update graph.py

Browse files
Files changed (1) hide show
  1. graph.py +24 -22
graph.py CHANGED
@@ -574,6 +574,29 @@ CRITICAL REQUIREMENTS:
574
  # Complete code here
575
  Generate complete repository:"""
576
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
577
  response = llm.invoke(enhanced_prompt)
578
  llm_text = getattr(response, "content", "") or ""
579
 
@@ -673,28 +696,7 @@ CRITICAL REQUIREMENTS:
673
  "status_update": f"Repository created ({len(repo_files)} files)"
674
  }
675
 
676
- # OTHER ARTIFACT TYPES
677
- enhanced_prompt = f"""Create HIGH-QUALITY {exp_type} artifact.
678
- {full_context}
679
-
680
- GOAL: {goal}
681
-
682
- REQUIREMENTS:
683
-
684
- Use ALL specific details from request
685
-
686
- PRODUCTION-READY, COMPLETE content (NO placeholders)
687
-
688
- ACTUAL data, REALISTIC examples, WORKING code
689
-
690
- For notebooks: markdown + executable code + visualizations
691
-
692
- For scripts: error handling + docs + real logic
693
-
694
- For documents: substantive detailed content
695
-
696
- Generate complete content for '{exp_type}' with proper code fences."""
697
-
698
  response = llm.invoke(enhanced_prompt)
699
  llm_text = getattr(response, "content", "") or ""
700
  results = {"success": False, "paths": {}, "stderr": "", "stdout": "", "context_used": len(full_context)}
 
574
  # Complete code here
575
  Generate complete repository:"""
576
 
577
+ # OTHER ARTIFACT TYPES
578
+ enhanced_prompt = f"""Create HIGH-QUALITY {exp_type} artifact.
579
+ {full_context}
580
+
581
+ GOAL: {goal}
582
+
583
+ REQUIREMENTS:
584
+
585
+ Use ALL specific details from request
586
+
587
+ PRODUCTION-READY, COMPLETE content (NO placeholders)
588
+
589
+ ACTUAL data, REALISTIC examples, WORKING code
590
+
591
+ For notebooks: markdown + executable code + visualizations
592
+
593
+ For scripts: error handling + docs + real logic
594
+
595
+ For documents: substantive detailed content
596
+
597
+ Generate complete content for '{exp_type}' with proper code fences."""
598
+
599
+
600
  response = llm.invoke(enhanced_prompt)
601
  llm_text = getattr(response, "content", "") or ""
602
 
 
696
  "status_update": f"Repository created ({len(repo_files)} files)"
697
  }
698
 
699
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
700
  response = llm.invoke(enhanced_prompt)
701
  llm_text = getattr(response, "content", "") or ""
702
  results = {"success": False, "paths": {}, "stderr": "", "stdout": "", "context_used": len(full_context)}