Leon4gr45 commited on
Commit
3bde163
·
verified ·
1 Parent(s): 7c9579c

Upload folder using huggingface_hub

Browse files
app/api/generate-ai-code-stream/route.ts CHANGED
@@ -1,5 +1,5 @@
1
  import { NextRequest, NextResponse } from 'next/server';
2
- import { streamText } from 'ai';
3
  import type { SandboxState } from '@/types/sandbox';
4
  import { selectFilesForEdit, getFileContents, formatFilesForAI } from '@/lib/context-selector';
5
  import { executeSearchPlan, formatSearchResultsForAI, selectTargetFile } from '@/lib/file-search-executor';
@@ -1188,6 +1188,7 @@ MORPH FAST APPLY MODE (EDIT-ONLY):
1188
  console.log(`[generate-ai-code-stream] Using provider for model: ${actualModel}`);
1189
  console.log(`[generate-ai-code-stream] Model string: ${model}`);
1190
 
 
1191
  // Make streaming API call with appropriate provider
1192
  const streamOptions: any = {
1193
  model: modelProvider(actualModel),
@@ -1786,6 +1787,116 @@ Provide the complete file content without any truncation. Include all necessary
1786
 
1787
  console.log('[generate-ai-code-stream] Updated conversation history with edit:', editRecord);
1788
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1789
 
1790
  } catch (error) {
1791
  console.error('[generate-ai-code-stream] Stream processing error:', error);
 
1
  import { NextRequest, NextResponse } from 'next/server';
2
+ import { streamText, generateText } from 'ai';
3
  import type { SandboxState } from '@/types/sandbox';
4
  import { selectFilesForEdit, getFileContents, formatFilesForAI } from '@/lib/context-selector';
5
  import { executeSearchPlan, formatSearchResultsForAI, selectTargetFile } from '@/lib/file-search-executor';
 
1188
  console.log(`[generate-ai-code-stream] Using provider for model: ${actualModel}`);
1189
  console.log(`[generate-ai-code-stream] Model string: ${model}`);
1190
 
1191
+ if (isEdit) {
1192
  // Make streaming API call with appropriate provider
1193
  const streamOptions: any = {
1194
  model: modelProvider(actualModel),
 
1787
 
1788
  console.log('[generate-ai-code-stream] Updated conversation history with edit:', editRecord);
1789
  }
1790
+ } else {
1791
+ // New logic for initial generation (non-edit mode)
1792
+ await sendProgress({ type: 'status', message: 'Creating file generation plan...' });
1793
+ const planPrompt = `Based on the user's request for a new web application, provide a list of files to create.
1794
+ User Request: "${prompt}"
1795
+
1796
+ Respond ONLY with a JSON array of strings, where each string is a file path.
1797
+ Example: ["src/index.css", "src/App.jsx", "src/components/Header.jsx", "src/components/Hero.jsx", "src/components/Footer.jsx"]`;
1798
+
1799
+ const { text: filePlanJson } = await generateText({
1800
+ model: modelProvider(actualModel),
1801
+ system: "You are a senior software architect. Your task is to plan the file structure for a new React application based on a user's request. You only respond with a JSON array of file paths.",
1802
+ prompt: planPrompt,
1803
+ temperature: 0.2, // Low temp for planning
1804
+ });
1805
+
1806
+ let filePlan: string[];
1807
+ try {
1808
+ // Attempt to parse the JSON. Handle cases where the AI might return markdown
1809
+ const cleanedJson = filePlanJson.replace(/```json\n|```/g, '').trim();
1810
+ filePlan = JSON.parse(cleanedJson);
1811
+ console.log('[generate-ai-code-stream] Parsed file plan:', filePlan);
1812
+ } catch (e) {
1813
+ console.error("Failed to parse file plan:", filePlanJson);
1814
+ await sendProgress({ type: 'error', message: 'Failed to create a file generation plan. The AI returned an invalid format.' });
1815
+ throw new Error("Invalid file plan format");
1816
+ }
1817
+
1818
+ await sendProgress({ type: 'plan', files: filePlan });
1819
+
1820
+ let generatedCode = '';
1821
+ let componentCount = 0;
1822
+ const generatedFilesContent: { [key: string]: string } = {};
1823
+
1824
+ for (const filePath of filePlan) {
1825
+ await sendProgress({ type: 'status', message: `Generating ${filePath}...` });
1826
+
1827
+ // Accumulate context from previously generated files
1828
+ let accumulatedContext = '';
1829
+ if (Object.keys(generatedFilesContent).length > 0) {
1830
+ accumulatedContext += "\n\nPreviously generated files for context:\n";
1831
+ for (const [path, content] of Object.entries(generatedFilesContent)) {
1832
+ accumulatedContext += `<file path="${path}">\n${content}\n</file>\n`;
1833
+ }
1834
+ }
1835
+
1836
+ const fileGenPrompt = `The overall user request is to build a new web application: "${prompt}".
1837
+ The full planned application file structure is: ${JSON.stringify(filePlan)}.
1838
+ ${accumulatedContext}
1839
+ Your current task is to generate the complete, production-ready code for the following file ONLY:
1840
+ File: ${filePath}
1841
+
1842
+ CRITICAL INSTRUCTIONS:
1843
+ 1. Generate ONLY the code for the specified file path.
1844
+ 2. The file must be complete, with all necessary imports and code.
1845
+ 3. Do NOT include any explanations, markdown, or XML tags. Your entire response will be the content of this single file.
1846
+ 4. Adhere to all the rules specified in the system prompt (Tailwind usage, no inline styles, etc.).
1847
+ 5. Make sure you are generating code that aligns with the other files in the plan (e.g., if App.jsx imports Header.jsx, the Header.jsx you generate should be a valid React component).`;
1848
+
1849
+ let fileContent = '';
1850
+ const fileResult = await streamText({
1851
+ model: modelProvider(actualModel),
1852
+ messages: [
1853
+ { role: 'system', content: systemPrompt },
1854
+ { role: 'user', content: fileGenPrompt }
1855
+ ],
1856
+ maxTokens: 4096, // Smaller token limit per file
1857
+ temperature: 0.7
1858
+ });
1859
+
1860
+ const fileTagStart = `<file path="${filePath}">`;
1861
+ generatedCode += fileTagStart;
1862
+ await sendProgress({ type: 'stream', text: fileTagStart, raw: true });
1863
+
1864
+ for await (const textPart of fileResult.textStream) {
1865
+ fileContent += textPart;
1866
+ generatedCode += textPart;
1867
+ await sendProgress({ type: 'stream', text: textPart, raw: true });
1868
+ }
1869
+ generatedFilesContent[filePath] = fileContent;
1870
+
1871
+ const fileTagEnd = `</file>`;
1872
+ generatedCode += fileTagEnd;
1873
+ await sendProgress({ type: 'stream', text: fileTagEnd, raw: true });
1874
+
1875
+
1876
+ if (filePath.includes('components/')) {
1877
+ componentCount++;
1878
+ const componentName = filePath.split('/').pop()?.replace('.jsx', '') || 'Component';
1879
+ await sendProgress({
1880
+ type: 'component',
1881
+ name: componentName,
1882
+ path: filePath,
1883
+ index: componentCount
1884
+ });
1885
+ }
1886
+ }
1887
+
1888
+ // Finalize
1889
+ await sendProgress({
1890
+ type: 'complete',
1891
+ generatedCode,
1892
+ explanation: 'Application generated successfully.',
1893
+ files: filePlan.length,
1894
+ components: componentCount,
1895
+ model,
1896
+ packagesToInstall: undefined,
1897
+ warnings: undefined
1898
+ });
1899
+ }
1900
 
1901
  } catch (error) {
1902
  console.error('[generate-ai-code-stream] Stream processing error:', error);