jainarham commited on
Commit
084fb33
·
verified ·
1 Parent(s): 72d268d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -111
app.py CHANGED
@@ -1,6 +1,5 @@
1
  """
2
- Main FastAPI Application for Text-to-3D Generator
3
- Completely Free - No Paid APIs
4
  """
5
 
6
  from fastapi import FastAPI, HTTPException, Request
@@ -12,23 +11,21 @@ import uvicorn
12
  import base64
13
  import uuid
14
  import logging
 
15
 
16
  from nlp_processor import NLPProcessor
17
  from model_generator import ModelGenerator
18
  from context_memory import ContextMemory
19
 
20
- # Configure logging
21
  logging.basicConfig(level=logging.INFO)
22
  logger = logging.getLogger(__name__)
23
 
24
- # Initialize FastAPI app
25
  app = FastAPI(
26
- title="Free Text-to-3D API",
27
- description="Convert natural language to 3D models - 100% Free",
28
- version="1.0.0"
29
  )
30
 
31
- # CORS middleware for frontend communication
32
  app.add_middleware(
33
  CORSMiddleware,
34
  allow_origins=["*"],
@@ -37,13 +34,12 @@ app.add_middleware(
37
  allow_headers=["*"],
38
  )
39
 
40
- # Initialize modules
41
- nlp_processor = NLPProcessor()
42
- model_generator = ModelGenerator()
43
- context_memory = ContextMemory()
44
 
45
 
46
- # Request/Response Models
47
  class GenerateRequest(BaseModel):
48
  prompt: str
49
  session_id: Optional[str] = None
@@ -58,157 +54,118 @@ class GenerateResponse(BaseModel):
58
  model_obj: Optional[str] = None
59
  model_params: Optional[Dict[str, Any]] = None
60
  interpretation: Optional[str] = None
 
61
 
62
 
63
- class HealthResponse(BaseModel):
64
- status: str
65
- version: str
66
-
67
-
68
- @app.get("/", response_model=HealthResponse)
69
  async def root():
70
- """Health check endpoint"""
71
- return HealthResponse(status="healthy", version="1.0.0")
72
 
73
 
74
- @app.get("/health", response_model=HealthResponse)
75
- async def health_check():
76
- """Health check endpoint"""
77
- return HealthResponse(status="healthy", version="1.0.0")
78
 
79
 
80
  @app.post("/generate", response_model=GenerateResponse)
81
- async def generate_model(request: GenerateRequest):
82
- """
83
- Generate or refine a 3D model from text prompt
84
- """
85
  try:
86
- # Generate session ID if not provided
87
  session_id = request.session_id or str(uuid.uuid4())
88
 
89
- logger.info(f"Processing request for session {session_id}: {request.prompt}")
90
 
91
- # Get existing context if this is a refinement
92
- existing_context = None
93
  if request.is_refinement:
94
- existing_context = context_memory.get_context(session_id)
95
 
96
- # Process the prompt with NLP
97
- nlp_result = nlp_processor.process_prompt(
98
- prompt=request.prompt,
99
- existing_context=existing_context
100
- )
101
 
102
- if not nlp_result["success"]:
103
  return GenerateResponse(
104
  success=False,
105
  session_id=session_id,
106
- message=nlp_result.get("error", "Failed to understand prompt"),
107
- interpretation=nlp_result.get("interpretation", "")
108
  )
109
 
110
- # Generate the 3D model
111
  model_params = nlp_result["model_params"]
112
- model_result = model_generator.generate(model_params)
113
 
114
- if not model_result["success"]:
115
  return GenerateResponse(
116
  success=False,
117
  session_id=session_id,
118
- message=model_result.get("error", "Failed to generate model"),
119
- interpretation=nlp_result.get("interpretation", "")
120
  )
121
 
122
- # Save context for future refinements
123
- context_memory.save_context(session_id, {
124
- "prompt_history": context_memory.get_context(session_id, {}).get("prompt_history", []) + [request.prompt],
125
  "model_params": model_params,
126
  "nlp_result": nlp_result
127
  })
128
 
129
- # Encode models to base64
130
- glb_base64 = base64.b64encode(model_result["glb_data"]).decode("utf-8")
131
- obj_base64 = base64.b64encode(model_result["obj_data"]).decode("utf-8")
 
 
 
 
132
 
133
  return GenerateResponse(
134
  success=True,
135
  session_id=session_id,
136
  message="Model generated successfully",
137
- model_glb=glb_base64,
138
- model_obj=obj_base64,
139
  model_params=model_params,
140
- interpretation=nlp_result.get("interpretation", "")
 
141
  )
142
 
143
  except Exception as e:
144
- logger.error(f"Error generating model: {str(e)}")
145
  raise HTTPException(status_code=500, detail=str(e))
146
 
147
 
148
  @app.post("/refine", response_model=GenerateResponse)
149
- async def refine_model(request: GenerateRequest):
150
- """
151
- Refine an existing model based on new instructions
152
- """
153
  request.is_refinement = True
154
- return await generate_model(request)
155
-
156
-
157
- @app.get("/session/{session_id}")
158
- async def get_session(session_id: str):
159
- """Get session context"""
160
- context = context_memory.get_context(session_id)
161
- if context:
162
- return {"success": True, "context": context}
163
- return {"success": False, "message": "Session not found"}
164
-
165
-
166
- @app.delete("/session/{session_id}")
167
- async def clear_session(session_id: str):
168
- """Clear session context"""
169
- context_memory.clear_context(session_id)
170
- return {"success": True, "message": "Session cleared"}
171
 
172
 
173
- @app.get("/supported-shapes")
174
- async def get_supported_shapes():
175
- """Get list of supported shapes and operations"""
176
  return {
177
- "shapes": [
178
- "cube", "box", "sphere", "ball",
179
- "cylinder", "cone", "torus", "donut",
180
- "pyramid", "plane", "capsule"
181
- ],
182
- "colors": [
183
- "red", "green", "blue", "yellow", "orange",
184
- "purple", "pink", "white", "black", "gray",
185
- "brown", "cyan", "magenta", "gold", "silver"
186
- ],
187
- "sizes": ["tiny", "small", "medium", "large", "huge"],
188
- "operations": [
189
- "combine", "merge", "stack", "place on top",
190
- "next to", "inside", "rotate", "scale"
191
- ],
192
- "modifiers": [
193
- "smooth", "rough", "metallic", "glossy", "matte"
194
  ]
195
  }
196
 
197
 
198
- # Error handlers
199
  @app.exception_handler(Exception)
200
- async def global_exception_handler(request: Request, exc: Exception):
201
- logger.error(f"Global error: {str(exc)}")
202
- return JSONResponse(
203
- status_code=500,
204
- content={"success": False, "message": str(exc)}
205
- )
206
 
207
 
208
  if __name__ == "__main__":
209
- uvicorn.run(
210
- "app:app",
211
- host="0.0.0.0",
212
- port=7860,
213
- reload=True
214
- )
 
1
  """
2
+ Main FastAPI Application - Advanced Text-to-3D
 
3
  """
4
 
5
  from fastapi import FastAPI, HTTPException, Request
 
11
  import base64
12
  import uuid
13
  import logging
14
+ import time
15
 
16
  from nlp_processor import NLPProcessor
17
  from model_generator import ModelGenerator
18
  from context_memory import ContextMemory
19
 
 
20
  logging.basicConfig(level=logging.INFO)
21
  logger = logging.getLogger(__name__)
22
 
 
23
  app = FastAPI(
24
+ title="Advanced Text-to-3D API",
25
+ description="Professional AI-powered text to 3D model generation",
26
+ version="2.0.0"
27
  )
28
 
 
29
  app.add_middleware(
30
  CORSMiddleware,
31
  allow_origins=["*"],
 
34
  allow_headers=["*"],
35
  )
36
 
37
+ # Initialize
38
+ nlp = NLPProcessor()
39
+ generator = ModelGenerator()
40
+ memory = ContextMemory()
41
 
42
 
 
43
  class GenerateRequest(BaseModel):
44
  prompt: str
45
  session_id: Optional[str] = None
 
54
  model_obj: Optional[str] = None
55
  model_params: Optional[Dict[str, Any]] = None
56
  interpretation: Optional[str] = None
57
+ generation_time: Optional[float] = None
58
 
59
 
60
+ @app.get("/")
 
 
 
 
 
61
  async def root():
62
+ return {"status": "healthy", "version": "2.0.0", "service": "Advanced Text-to-3D"}
 
63
 
64
 
65
+ @app.get("/health")
66
+ async def health():
67
+ return {"status": "healthy", "version": "2.0.0"}
 
68
 
69
 
70
  @app.post("/generate", response_model=GenerateResponse)
71
+ async def generate(request: GenerateRequest):
72
+ start_time = time.time()
73
+
 
74
  try:
 
75
  session_id = request.session_id or str(uuid.uuid4())
76
 
77
+ logger.info(f"[{session_id[:8]}] Prompt: {request.prompt}")
78
 
79
+ # Get context for refinements
80
+ context = None
81
  if request.is_refinement:
82
+ context = memory.get_context(session_id)
83
 
84
+ # Process with NLP
85
+ nlp_result = nlp.process_prompt(request.prompt, context)
 
 
 
86
 
87
+ if not nlp_result.get("success"):
88
  return GenerateResponse(
89
  success=False,
90
  session_id=session_id,
91
+ message=nlp_result.get("error", "Failed to understand"),
92
+ interpretation=nlp_result.get("interpretation")
93
  )
94
 
95
+ # Generate 3D model
96
  model_params = nlp_result["model_params"]
97
+ model_result = generator.generate(model_params)
98
 
99
+ if not model_result.get("success"):
100
  return GenerateResponse(
101
  success=False,
102
  session_id=session_id,
103
+ message=model_result.get("error", "Generation failed"),
104
+ interpretation=nlp_result.get("interpretation")
105
  )
106
 
107
+ # Save context
108
+ memory.save_context(session_id, {
109
+ "prompt_history": memory.get_context(session_id, {}).get("prompt_history", []) + [request.prompt],
110
  "model_params": model_params,
111
  "nlp_result": nlp_result
112
  })
113
 
114
+ # Encode models
115
+ glb_b64 = base64.b64encode(model_result["glb_data"]).decode("utf-8")
116
+ obj_b64 = base64.b64encode(model_result["obj_data"]).decode("utf-8")
117
+
118
+ gen_time = time.time() - start_time
119
+
120
+ logger.info(f"[{session_id[:8]}] Generated in {gen_time:.2f}s")
121
 
122
  return GenerateResponse(
123
  success=True,
124
  session_id=session_id,
125
  message="Model generated successfully",
126
+ model_glb=glb_b64,
127
+ model_obj=obj_b64,
128
  model_params=model_params,
129
+ interpretation=nlp_result.get("interpretation"),
130
+ generation_time=round(gen_time, 2)
131
  )
132
 
133
  except Exception as e:
134
+ logger.error(f"Error: {str(e)}")
135
  raise HTTPException(status_code=500, detail=str(e))
136
 
137
 
138
  @app.post("/refine", response_model=GenerateResponse)
139
+ async def refine(request: GenerateRequest):
 
 
 
140
  request.is_refinement = True
141
+ return await generate(request)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
 
144
+ @app.get("/capabilities")
145
+ async def capabilities():
 
146
  return {
147
+ "shapes": ["cube", "sphere", "cylinder", "cone", "torus", "pyramid", "capsule", "plane"],
148
+ "arrangements": ["row", "column", "circle", "grid", "stack", "random", "increasing", "decreasing"],
149
+ "relations": ["on top of", "below", "next to", "left of", "right of", "in front of", "behind", "inside", "around"],
150
+ "materials": ["metallic", "matte", "glossy", "shiny", "rough", "smooth"],
151
+ "modifiers": ["bigger", "smaller", "taller", "shorter", "wider", "rotate", "flip"],
152
+ "examples": [
153
+ "3 red cubes",
154
+ "5 blue spheres arranged in a circle",
155
+ "A cone on top of a cylinder",
156
+ "Golden torus next to silver pyramid",
157
+ "6 cubes of increasing size",
158
+ "Stack a sphere on a cube on a cylinder",
159
+ "Grid of 9 colorful spheres"
 
 
 
 
160
  ]
161
  }
162
 
163
 
 
164
  @app.exception_handler(Exception)
165
+ async def exception_handler(request: Request, exc: Exception):
166
+ logger.error(f"Unhandled: {str(exc)}")
167
+ return JSONResponse(status_code=500, content={"success": False, "message": str(exc)})
 
 
 
168
 
169
 
170
  if __name__ == "__main__":
171
+ uvicorn.run("app:app", host="0.0.0.0", port=7860, reload=True)