subashpoudel commited on
Commit
ef9fa4b
·
1 Parent(s): 38cf703

Converted code to OOP

Browse files
Files changed (47) hide show
  1. api/__pycache__/backup_prompts.cpython-312.pyc +0 -0
  2. api/backup_prompts.py +3 -0
  3. api/routers/__pycache__/brainstorm.cpython-312.pyc +0 -0
  4. api/routers/__pycache__/context_analysis.cpython-312.pyc +0 -0
  5. api/routers/__pycache__/generate_final_story.cpython-312.pyc +0 -0
  6. api/routers/__pycache__/generate_image.cpython-312.pyc +0 -0
  7. api/routers/__pycache__/human_idea_refining.cpython-312.pyc +0 -0
  8. api/routers/__pycache__/ideation.cpython-312.pyc +0 -0
  9. api/routers/__pycache__/orchestration.cpython-312.pyc +0 -0
  10. api/routers/brainstorm.py +9 -18
  11. api/routers/context_analysis.py +3 -9
  12. api/routers/generate_final_story.py +2 -2
  13. api/routers/generate_image.py +4 -3
  14. api/routers/human_idea_refining.py +1 -6
  15. api/routers/ideation.py +0 -1
  16. api/routers/orchestration.py +1 -4
  17. api/schemas/__init__.py +0 -0
  18. api/schemas/__pycache__/brainstorming.cpython-312.pyc +0 -0
  19. api/schemas/__pycache__/context_analysis.cpython-312.pyc +0 -0
  20. api/schemas/__pycache__/human_idea_refining.cpython-312.pyc +0 -0
  21. api/schemas/brainstorming.py +7 -0
  22. api/schemas/context_analysis.py +4 -0
  23. api/schemas/human_idea_refining.py +6 -0
  24. api/schemas/ideation.py +0 -0
  25. logs/access.log +90 -0
  26. logs/app.log +1 -0
  27. requirements.txt +1 -1
  28. src/genai/brainstroming_agent/__pycache__/agent.cpython-312.pyc +0 -0
  29. src/genai/brainstroming_agent/agent.py +16 -15
  30. src/genai/brainstroming_agent/utils/__pycache__/nodes.cpython-312.pyc +0 -0
  31. src/genai/brainstroming_agent/utils/__pycache__/prompts.cpython-312.pyc +0 -0
  32. src/genai/brainstroming_agent/utils/__pycache__/tools.cpython-312.pyc +0 -0
  33. src/genai/brainstroming_agent/utils/__pycache__/utils.cpython-312.pyc +0 -0
  34. src/genai/brainstroming_agent/utils/nodes.py +102 -108
  35. src/genai/brainstroming_agent/utils/prompts.py +1 -1
  36. src/genai/brainstroming_agent/utils/tools.py +41 -43
  37. src/genai/brainstroming_agent/utils/utils.py +48 -114
  38. src/genai/context_analysis_agent/__pycache__/agent.cpython-312.pyc +0 -0
  39. src/genai/context_analysis_agent/agent.py +6 -5
  40. src/genai/context_analysis_agent/utils/__pycache__/nodes.cpython-312.pyc +0 -0
  41. src/genai/context_analysis_agent/utils/__pycache__/utils.cpython-312.pyc +0 -0
  42. src/genai/context_analysis_agent/utils/nodes.py +13 -15
  43. src/genai/context_analysis_agent/utils/utils.py +40 -22
  44. src/genai/ideation_agent/utils/__pycache__/prompts.cpython-312.pyc +0 -0
  45. src/genai/ideation_agent/utils/prompts.py +99 -2
  46. src/genai/utils/__pycache__/models_loader.cpython-312.pyc +0 -0
  47. src/genai/utils/models_loader.py +2 -1
api/__pycache__/backup_prompts.cpython-312.pyc ADDED
Binary file (455 Bytes). View file
 
api/backup_prompts.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ image_gen_backup_prompt = "I don't have any story right now. Just use the business details for now."
2
+ brainstorm_backup_prompt = "I don't have any idea right now. Create your own **very creative** and **out of the box** video idea and generate the story for now."
3
+
api/routers/__pycache__/brainstorm.cpython-312.pyc CHANGED
Binary files a/api/routers/__pycache__/brainstorm.cpython-312.pyc and b/api/routers/__pycache__/brainstorm.cpython-312.pyc differ
 
api/routers/__pycache__/context_analysis.cpython-312.pyc CHANGED
Binary files a/api/routers/__pycache__/context_analysis.cpython-312.pyc and b/api/routers/__pycache__/context_analysis.cpython-312.pyc differ
 
api/routers/__pycache__/generate_final_story.cpython-312.pyc CHANGED
Binary files a/api/routers/__pycache__/generate_final_story.cpython-312.pyc and b/api/routers/__pycache__/generate_final_story.cpython-312.pyc differ
 
api/routers/__pycache__/generate_image.cpython-312.pyc CHANGED
Binary files a/api/routers/__pycache__/generate_image.cpython-312.pyc and b/api/routers/__pycache__/generate_image.cpython-312.pyc differ
 
api/routers/__pycache__/human_idea_refining.cpython-312.pyc CHANGED
Binary files a/api/routers/__pycache__/human_idea_refining.cpython-312.pyc and b/api/routers/__pycache__/human_idea_refining.cpython-312.pyc differ
 
api/routers/__pycache__/ideation.cpython-312.pyc CHANGED
Binary files a/api/routers/__pycache__/ideation.cpython-312.pyc and b/api/routers/__pycache__/ideation.cpython-312.pyc differ
 
api/routers/__pycache__/orchestration.cpython-312.pyc CHANGED
Binary files a/api/routers/__pycache__/orchestration.cpython-312.pyc and b/api/routers/__pycache__/orchestration.cpython-312.pyc differ
 
api/routers/brainstorm.py CHANGED
@@ -1,21 +1,14 @@
1
 
2
- import json
3
  from fastapi import APIRouter
4
  from fastapi.responses import StreamingResponse
5
  from api.stored_data import stored_data
6
- from pydantic import BaseModel
7
- from typing import Optional
8
- from src.genai.brainstroming_agent.agent import brainstroming_graph
9
 
10
  router=APIRouter()
11
- brainstrom_graph = brainstroming_graph()
12
-
13
-
14
- class BrainstormRequest(BaseModel):
15
- preferred_topics: Optional[list] = []
16
- image_base64_list: Optional[list] = []
17
- thread_id: Optional[str]="default-session"
18
-
19
 
20
  @router.post("/brainstorm")
21
  def brainstorming_endpoint(request: BrainstormRequest):
@@ -24,14 +17,11 @@ def brainstorming_endpoint(request: BrainstormRequest):
24
  if stored_data.get('refined_ideation')
25
  else [str(stored_data['final_ideation'])]
26
  if stored_data.get('final_ideation')
27
- else [
28
- "I don't have any idea right now. Create your own **very creative** and **out of the box** video idea and generate the story for now."
29
- ]
30
  )
31
 
32
  def event_stream():
33
-
34
- for stream_mode, chunk in brainstrom_graph.stream(
35
  {
36
  "idea": idea,
37
  "images": request.image_base64_list,
@@ -41,7 +31,8 @@ def brainstorming_endpoint(request: BrainstormRequest):
41
  config={"configurable": {"thread_id": request.thread_id}},
42
  stream_mode=['messages','values']
43
  ):
44
- print
45
  stored_data['brainstorming_response'] = chunk if isinstance(chunk, dict) else stored_data.get('brainstorming_response')
 
46
  if isinstance(chunk, tuple): yield chunk[0].content
 
47
  return StreamingResponse(event_stream(), media_type="text/event-stream")
 
1
 
 
2
  from fastapi import APIRouter
3
  from fastapi.responses import StreamingResponse
4
  from api.stored_data import stored_data
5
+ from src.genai.brainstroming_agent.agent import BrainstormingAgent
6
+ from api.schemas.brainstorming import BrainstormRequest
7
+ from api.backup_prompts import brainstorm_backup_prompt
8
 
9
  router=APIRouter()
10
+ agent = BrainstormingAgent()
11
+ brainstorm_graph = agent.brainstorming_graph()
 
 
 
 
 
 
12
 
13
  @router.post("/brainstorm")
14
  def brainstorming_endpoint(request: BrainstormRequest):
 
17
  if stored_data.get('refined_ideation')
18
  else [str(stored_data['final_ideation'])]
19
  if stored_data.get('final_ideation')
20
+ else [brainstorm_backup_prompt]
 
 
21
  )
22
 
23
  def event_stream():
24
+ for stream_mode, chunk in brainstorm_graph.stream(
 
25
  {
26
  "idea": idea,
27
  "images": request.image_base64_list,
 
31
  config={"configurable": {"thread_id": request.thread_id}},
32
  stream_mode=['messages','values']
33
  ):
 
34
  stored_data['brainstorming_response'] = chunk if isinstance(chunk, dict) else stored_data.get('brainstorming_response')
35
+ # if isinstance(chunk, tuple): yield f"data: {json.dumps(chunk[0].content)}"
36
  if isinstance(chunk, tuple): yield chunk[0].content
37
+
38
  return StreamingResponse(event_stream(), media_type="text/event-stream")
api/routers/context_analysis.py CHANGED
@@ -1,20 +1,14 @@
1
  import ast
2
  import json
3
- from fastapi import APIRouter, Depends
4
  from fastapi.responses import StreamingResponse
5
- from pydantic import BaseModel
6
  from api.stored_data import stored_data
7
  from src.genai.context_analysis_agent.agent import IntroductionChatbot
8
- router = APIRouter()
9
-
10
 
11
- class UserMessage(BaseModel):
12
- message: str
13
  context_analysis_graph = IntroductionChatbot()
14
 
15
-
16
- ## ---------------------- Passing in json ------------------------
17
-
18
  @router.post("/context-analysis")
19
  def context_analysis(msg: UserMessage):
20
  def event_generator():
 
1
  import ast
2
  import json
3
+ from fastapi import APIRouter
4
  from fastapi.responses import StreamingResponse
 
5
  from api.stored_data import stored_data
6
  from src.genai.context_analysis_agent.agent import IntroductionChatbot
7
+ from api.schemas.context_analysis import UserMessage
 
8
 
9
+ router = APIRouter()
 
10
  context_analysis_graph = IntroductionChatbot()
11
 
 
 
 
12
  @router.post("/context-analysis")
13
  def context_analysis(msg: UserMessage):
14
  def event_generator():
api/routers/generate_final_story.py CHANGED
@@ -1,14 +1,14 @@
1
  from fastapi import APIRouter
2
  from fastapi.responses import StreamingResponse
3
  from api.stored_data import stored_data
4
- from src.genai.brainstroming_agent.utils.utils import generate_final_story
5
 
6
  router= APIRouter()
7
 
8
  @router.post("/generate-final-story")
9
  def generate_final_story_endpoint():
10
  def event_stream():
11
- for chunk in generate_final_story(stored_data.get('brainstorming_response') or stored_data.get('business_details')):
12
  yield chunk
13
 
14
  return StreamingResponse(event_stream(), media_type="text/event-stream")
 
1
  from fastapi import APIRouter
2
  from fastapi.responses import StreamingResponse
3
  from api.stored_data import stored_data
4
+ from src.genai.brainstroming_agent.utils.utils import FinalStoryGenenrator
5
 
6
  router= APIRouter()
7
 
8
  @router.post("/generate-final-story")
9
  def generate_final_story_endpoint():
10
  def event_stream():
11
+ for chunk in FinalStoryGenenrator().generate_final_story(stored_data.get('brainstorming_response') or stored_data.get('business_details')):
12
  yield chunk
13
 
14
  return StreamingResponse(event_stream(), media_type="text/event-stream")
api/routers/generate_image.py CHANGED
@@ -1,13 +1,14 @@
1
  from fastapi import APIRouter
2
  from api.stored_data import stored_data
3
- from src.genai.brainstroming_agent.utils.utils import generate_image
 
4
  router = APIRouter()
5
 
6
  @router.post("/generate-image")
7
  def generate_image_endpoint():
8
- image = generate_image(str(stored_data.get('final_story','''I don't have any story right now. Just use the business details for now.'''))
9
  ,str(stored_data.get('business_details'))
10
- ,str(stored_data.get('refined_ideation','''I don't have any idea right now. Just use the business details for now.''')))
11
  stored_data['generated_image']=image
12
  return {
13
  'response':image
 
1
  from fastapi import APIRouter
2
  from api.stored_data import stored_data
3
+ from src.genai.brainstroming_agent.utils.utils import ImageGenerator
4
+ from api.backup_prompts import image_gen_backup_prompt
5
  router = APIRouter()
6
 
7
  @router.post("/generate-image")
8
  def generate_image_endpoint():
9
+ image = ImageGenerator().generate_image(str(stored_data.get('final_story',image_gen_backup_prompt))
10
  ,str(stored_data.get('business_details'))
11
+ ,str(stored_data.get('refined_ideation',image_gen_backup_prompt)))
12
  stored_data['generated_image']=image
13
  return {
14
  'response':image
api/routers/human_idea_refining.py CHANGED
@@ -1,15 +1,10 @@
1
  from fastapi import APIRouter
2
- from pydantic import BaseModel
3
- from typing import Optional
4
  from api.stored_data import stored_data
5
  from src.genai.human_refined_ideation.agent import human_refined_idea
 
6
  router= APIRouter()
7
  human_refine_graph = human_refined_idea()
8
 
9
- class RefineIdeationRequest(BaseModel):
10
- query: str
11
- thread_id: Optional[str]="refine_ideas_thread"
12
-
13
  @router.post("/human-idea-refining")
14
  def human_idea_refine_endpoint(request:RefineIdeationRequest):
15
  stored_data['human_ideation_interactions'].append({"role": "user", "content": request.query})
 
1
  from fastapi import APIRouter
 
 
2
  from api.stored_data import stored_data
3
  from src.genai.human_refined_ideation.agent import human_refined_idea
4
+ from api.schemas.human_idea_refining import RefineIdeationRequest
5
  router= APIRouter()
6
  human_refine_graph = human_refined_idea()
7
 
 
 
 
 
8
  @router.post("/human-idea-refining")
9
  def human_idea_refine_endpoint(request:RefineIdeationRequest):
10
  stored_data['human_ideation_interactions'].append({"role": "user", "content": request.query})
api/routers/ideation.py CHANGED
@@ -1,6 +1,5 @@
1
  import ast
2
  from fastapi import APIRouter
3
- from fastapi.responses import StreamingResponse
4
  from api.stored_data import stored_data
5
  from src.genai.ideation_agent.agent import IdeationAgent
6
  from langgraph.errors import GraphRecursionError
 
1
  import ast
2
  from fastapi import APIRouter
 
3
  from api.stored_data import stored_data
4
  from src.genai.ideation_agent.agent import IdeationAgent
5
  from langgraph.errors import GraphRecursionError
api/routers/orchestration.py CHANGED
@@ -1,7 +1,4 @@
1
-
2
- import json
3
- from fastapi import APIRouter, Depends
4
- from fastapi.responses import StreamingResponse
5
  from pydantic import BaseModel
6
  from api.stored_data import stored_data
7
  from src.genai.orchestration_agent.agent import OrchestrationAgent
 
1
+ from fastapi import APIRouter
 
 
 
2
  from pydantic import BaseModel
3
  from api.stored_data import stored_data
4
  from src.genai.orchestration_agent.agent import OrchestrationAgent
api/schemas/__init__.py ADDED
File without changes
api/schemas/__pycache__/brainstorming.cpython-312.pyc ADDED
Binary file (672 Bytes). View file
 
api/schemas/__pycache__/context_analysis.cpython-312.pyc ADDED
Binary file (456 Bytes). View file
 
api/schemas/__pycache__/human_idea_refining.cpython-312.pyc ADDED
Binary file (583 Bytes). View file
 
api/schemas/brainstorming.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+ from typing import Optional
3
+
4
+ class BrainstormRequest(BaseModel):
5
+ preferred_topics: Optional[list] = []
6
+ image_base64_list: Optional[list] = []
7
+ thread_id: Optional[str]="brainstorm-thread"
api/schemas/context_analysis.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+
3
+ class UserMessage(BaseModel):
4
+ message: str
api/schemas/human_idea_refining.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+ from typing import Optional
3
+
4
+ class RefineIdeationRequest(BaseModel):
5
+ query: str
6
+ thread_id: Optional[str]="refine_ideas_thread"
api/schemas/ideation.py ADDED
File without changes
logs/access.log CHANGED
@@ -241,3 +241,93 @@
241
  2025-08-13 17:24:22,399 | INFO | access_logger | Response status: 200
242
  2025-08-13 17:24:37,955 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/orchestration
243
  2025-08-13 17:24:51,218 | INFO | access_logger | Response status: 200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241
  2025-08-13 17:24:22,399 | INFO | access_logger | Response status: 200
242
  2025-08-13 17:24:37,955 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/orchestration
243
  2025-08-13 17:24:51,218 | INFO | access_logger | Response status: 200
244
+ 2025-08-14 11:47:15,376 | INFO | access_logger | Request: GET http://127.0.0.1:8000/docs
245
+ 2025-08-14 11:47:15,398 | INFO | access_logger | Response status: 200
246
+ 2025-08-14 11:47:15,615 | INFO | access_logger | Request: GET http://127.0.0.1:8000/openapi.json
247
+ 2025-08-14 11:47:15,630 | INFO | access_logger | Response status: 200
248
+ 2025-08-14 11:47:21,828 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/ideation
249
+ 2025-08-14 11:48:12,754 | INFO | access_logger | Response status: 200
250
+ 2025-08-14 11:55:36,928 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/human-idea-refining
251
+ 2025-08-14 11:55:40,634 | INFO | access_logger | Response status: 200
252
+ 2025-08-14 11:55:56,886 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
253
+ 2025-08-14 11:55:56,888 | INFO | access_logger | Response status: 200
254
+ 2025-08-14 12:03:21,797 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
255
+ 2025-08-14 12:03:21,799 | INFO | access_logger | Response status: 200
256
+ 2025-08-14 12:14:09,786 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
257
+ 2025-08-14 12:14:09,801 | INFO | access_logger | Response status: 200
258
+ 2025-08-14 12:19:21,165 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
259
+ 2025-08-14 12:19:21,189 | INFO | access_logger | Response status: 200
260
+ 2025-08-14 12:20:57,591 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
261
+ 2025-08-14 12:20:57,593 | INFO | access_logger | Response status: 200
262
+ 2025-08-14 12:21:11,123 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
263
+ 2025-08-14 12:21:11,126 | INFO | access_logger | Response status: 200
264
+ 2025-08-14 12:22:02,907 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
265
+ 2025-08-14 12:22:02,909 | INFO | access_logger | Response status: 200
266
+ 2025-08-14 12:31:51,104 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
267
+ 2025-08-14 12:31:51,118 | INFO | access_logger | Response status: 200
268
+ 2025-08-14 12:32:26,599 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
269
+ 2025-08-14 12:32:26,601 | INFO | access_logger | Response status: 200
270
+ 2025-08-14 12:34:08,810 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
271
+ 2025-08-14 12:34:08,885 | INFO | access_logger | Response status: 200
272
+ 2025-08-14 12:35:30,940 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
273
+ 2025-08-14 12:35:30,957 | INFO | access_logger | Response status: 200
274
+ 2025-08-14 12:37:18,328 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
275
+ 2025-08-14 12:37:18,337 | INFO | access_logger | Response status: 200
276
+ 2025-08-14 12:40:59,993 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
277
+ 2025-08-14 12:40:59,997 | INFO | access_logger | Response status: 200
278
+ 2025-08-14 12:42:16,539 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
279
+ 2025-08-14 12:42:16,541 | INFO | access_logger | Response status: 200
280
+ 2025-08-14 12:42:54,879 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
281
+ 2025-08-14 12:42:54,902 | INFO | access_logger | Response status: 200
282
+ 2025-08-14 13:45:47,494 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
283
+ 2025-08-14 13:45:47,525 | INFO | access_logger | Response status: 200
284
+ 2025-08-14 13:52:59,711 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
285
+ 2025-08-14 13:52:59,714 | INFO | access_logger | Response status: 200
286
+ 2025-08-14 13:54:03,079 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
287
+ 2025-08-14 13:54:03,109 | INFO | access_logger | Response status: 200
288
+ 2025-08-14 14:00:06,222 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
289
+ 2025-08-14 14:00:06,323 | INFO | access_logger | Response status: 200
290
+ 2025-08-14 14:03:56,592 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
291
+ 2025-08-14 14:03:56,599 | INFO | access_logger | Response status: 200
292
+ 2025-08-14 14:09:05,364 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
293
+ 2025-08-14 14:09:05,402 | INFO | access_logger | Response status: 200
294
+ 2025-08-14 14:10:08,399 | INFO | access_logger | Request: GET http://127.0.0.1:8000/docs
295
+ 2025-08-14 14:10:08,400 | INFO | access_logger | Response status: 200
296
+ 2025-08-14 14:10:08,576 | INFO | access_logger | Request: GET http://127.0.0.1:8000/openapi.json
297
+ 2025-08-14 14:10:08,589 | INFO | access_logger | Response status: 200
298
+ 2025-08-14 14:10:19,149 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
299
+ 2025-08-14 14:10:19,151 | INFO | access_logger | Response status: 200
300
+ 2025-08-14 14:11:05,613 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
301
+ 2025-08-14 14:11:05,615 | INFO | access_logger | Response status: 200
302
+ 2025-08-14 14:24:54,914 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
303
+ 2025-08-14 14:24:54,986 | INFO | access_logger | Response status: 200
304
+ 2025-08-14 14:28:36,846 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
305
+ 2025-08-14 14:28:36,859 | INFO | access_logger | Response status: 200
306
+ 2025-08-14 14:30:58,723 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
307
+ 2025-08-14 14:30:58,748 | INFO | access_logger | Response status: 200
308
+ 2025-08-14 14:33:02,605 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/context-analysis
309
+ 2025-08-14 14:33:02,608 | INFO | access_logger | Response status: 200
310
+ 2025-08-14 14:34:33,342 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/context-analysis
311
+ 2025-08-14 14:34:33,358 | INFO | access_logger | Response status: 200
312
+ 2025-08-15 14:32:24,485 | INFO | access_logger | Request: GET http://127.0.0.1:8000/docs
313
+ 2025-08-15 14:32:24,512 | INFO | access_logger | Response status: 200
314
+ 2025-08-15 14:32:25,516 | INFO | access_logger | Request: GET http://127.0.0.1:8000/openapi.json
315
+ 2025-08-15 14:32:25,526 | INFO | access_logger | Response status: 200
316
+ 2025-08-15 14:32:40,662 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/orchestration
317
+ 2025-08-15 14:32:50,602 | INFO | access_logger | Response status: 200
318
+ 2025-08-15 14:33:02,418 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/show-analytics
319
+ 2025-08-15 14:33:03,160 | INFO | access_logger | Response status: 200
320
+ 2025-08-15 14:33:13,459 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/ideation
321
+ 2025-08-15 14:34:07,833 | INFO | access_logger | Response status: 200
322
+ 2025-08-15 14:36:25,214 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/human-idea-refining
323
+ 2025-08-15 14:36:28,230 | INFO | access_logger | Response status: 200
324
+ 2025-08-15 14:38:16,304 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/brainstorm
325
+ 2025-08-15 14:38:16,306 | INFO | access_logger | Response status: 200
326
+ 2025-08-15 14:39:13,673 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/generate-final-story
327
+ 2025-08-15 14:39:13,675 | INFO | access_logger | Response status: 200
328
+ 2025-08-15 14:39:29,362 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/generate-image
329
+ 2025-08-15 14:39:38,943 | INFO | access_logger | Response status: 200
330
+ 2025-08-15 15:12:47,227 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/context-analysis
331
+ 2025-08-15 15:12:47,265 | INFO | access_logger | Response status: 200
332
+ 2025-08-15 15:16:31,623 | INFO | access_logger | Request: POST http://127.0.0.1:8000/api/context-analysis
333
+ 2025-08-15 15:16:31,646 | INFO | access_logger | Response status: 200
logs/app.log CHANGED
@@ -3,3 +3,4 @@
3
  2025-08-13 11:48:54,941 | ERROR | app_logger | Error while showing analytics: retrieve_data_for_analytics() missing 1 required positional argument: 'business_details'
4
  2025-08-13 13:53:24,416 | INFO | app_logger | Showing Analytics of the influencers after context analysis.
5
  2025-08-13 17:24:22,398 | INFO | app_logger | Showing Analytics of the influencers after context analysis.
 
 
3
  2025-08-13 11:48:54,941 | ERROR | app_logger | Error while showing analytics: retrieve_data_for_analytics() missing 1 required positional argument: 'business_details'
4
  2025-08-13 13:53:24,416 | INFO | app_logger | Showing Analytics of the influencers after context analysis.
5
  2025-08-13 17:24:22,398 | INFO | app_logger | Showing Analytics of the influencers after context analysis.
6
+ 2025-08-15 14:33:03,159 | INFO | app_logger | Showing Analytics of the influencers after context analysis.
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
 
2
- langgraph==0.6.4
3
  langsmith
4
  langchain_groq
5
  pydantic
 
1
 
2
+ langgraph
3
  langsmith
4
  langchain_groq
5
  pydantic
src/genai/brainstroming_agent/__pycache__/agent.cpython-312.pyc CHANGED
Binary files a/src/genai/brainstroming_agent/__pycache__/agent.cpython-312.pyc and b/src/genai/brainstroming_agent/__pycache__/agent.cpython-312.pyc differ
 
src/genai/brainstroming_agent/agent.py CHANGED
@@ -1,23 +1,24 @@
1
  from langgraph.graph import StateGraph, START, END
2
  from .utils.state import State
3
- from .utils.nodes import retrieve, generate_story, generate_brainstroming,caption_image
4
  from langgraph.checkpoint.memory import MemorySaver
5
 
6
  memory = MemorySaver()
 
 
 
7
 
 
 
 
 
 
 
8
 
9
- def brainstroming_graph():
10
- builder = StateGraph(State)
11
- builder.add_node(caption_image)
12
- builder.add_node(retrieve)
13
- builder.add_node(generate_story)
14
- builder.add_node(generate_brainstroming)
15
 
16
-
17
- # Normal edges
18
- builder.add_edge(START, "caption_image")
19
- builder.add_edge("caption_image", "retrieve")
20
- builder.add_edge("retrieve", "generate_story")
21
- builder.add_edge("generate_story", "generate_brainstroming")
22
- builder.add_edge("generate_brainstroming", END)
23
- return builder.compile(checkpointer=memory)
 
1
  from langgraph.graph import StateGraph, START, END
2
  from .utils.state import State
3
+ from .utils.nodes import Retriever , ImageCaptioner , StoryGenerator, BrainstromTopicGenerator
4
  from langgraph.checkpoint.memory import MemorySaver
5
 
6
  memory = MemorySaver()
7
+ class BrainstormingAgent:
8
+ def __init__(self):
9
+ self.memory = MemorySaver()
10
 
11
+ def brainstorming_graph(self):
12
+ builder = StateGraph(State)
13
+ builder.add_node("caption_image",ImageCaptioner().run)
14
+ builder.add_node("retrieve",Retriever().run)
15
+ builder.add_node("generate_story",StoryGenerator().run)
16
+ builder.add_node("generate_brainstroming",BrainstromTopicGenerator().run)
17
 
 
 
 
 
 
 
18
 
19
+ builder.add_edge(START, "caption_image")
20
+ builder.add_edge("caption_image", "retrieve")
21
+ builder.add_edge("retrieve", "generate_story")
22
+ builder.add_edge("generate_story", "generate_brainstroming")
23
+ builder.add_edge("generate_brainstroming", END)
24
+ return builder.compile(checkpointer=self.memory)
 
 
src/genai/brainstroming_agent/utils/__pycache__/nodes.cpython-312.pyc CHANGED
Binary files a/src/genai/brainstroming_agent/utils/__pycache__/nodes.cpython-312.pyc and b/src/genai/brainstroming_agent/utils/__pycache__/nodes.cpython-312.pyc differ
 
src/genai/brainstroming_agent/utils/__pycache__/prompts.cpython-312.pyc CHANGED
Binary files a/src/genai/brainstroming_agent/utils/__pycache__/prompts.cpython-312.pyc and b/src/genai/brainstroming_agent/utils/__pycache__/prompts.cpython-312.pyc differ
 
src/genai/brainstroming_agent/utils/__pycache__/tools.cpython-312.pyc CHANGED
Binary files a/src/genai/brainstroming_agent/utils/__pycache__/tools.cpython-312.pyc and b/src/genai/brainstroming_agent/utils/__pycache__/tools.cpython-312.pyc differ
 
src/genai/brainstroming_agent/utils/__pycache__/utils.cpython-312.pyc CHANGED
Binary files a/src/genai/brainstroming_agent/utils/__pycache__/utils.cpython-312.pyc and b/src/genai/brainstroming_agent/utils/__pycache__/utils.cpython-312.pyc differ
 
src/genai/brainstroming_agent/utils/nodes.py CHANGED
@@ -1,118 +1,112 @@
1
- import pandas as pd
2
- import ast
3
- from .state import State
4
- from .tools import retrieve_tool
5
- from langchain_core.messages import SystemMessage ,HumanMessage, ToolMessage, FunctionMessage
6
- from src.genai.utils.models_loader import llm , llm_gpt
7
- from src.genai.utils.data_loader import load_influencer_data
8
- from groq import Groq
9
  import os
10
- from .prompts import image_captioning_prompt , initial_story_prompt , refined_story_prompt , brainstroming_prompt
11
- from langgraph.prebuilt import create_react_agent
 
12
  from .state import BrainstromTopicFormatter
 
 
 
13
 
14
 
15
-
16
- def caption_image(state: State) -> State:
17
- if len(state.images)>0:
18
- if state.images[-1]!=None:
19
- print('Captioning image')
20
- client = Groq(api_key=os.environ.get('GROQ_API_KEY'))
21
-
22
- chat_completion = client.chat.completions.create(
23
- messages=[
24
- {
25
- "role": "user",
26
- "content": [
27
- {"type": "text", "text": image_captioning_prompt(state)},
28
- {
29
- "type": "image_url",
30
- "image_url": {
31
- "url": f"data:image/jpeg;base64,{state.images[-1]}",
32
- },
33
- },
34
- ],
35
- }
36
- ],
37
- model="meta-llama/llama-4-scout-17b-16e-instruct",
38
- max_completion_tokens=50,
39
- temperature = 1
40
- )
41
- response=chat_completion.choices[0].message.content
42
- state.image_captions.append(response)
43
- return state
44
-
45
- else:
46
- state.images.append(None)
47
- state.image_captions.append(None)
48
- return state
49
-
50
-
51
-
52
- def retrieve(state: State) -> State:
53
- print('Moving to retrieval process')
54
- retrievals=[]
55
- query_prompt = 'Represent this sentence for searching relevant passages: '
56
- if len(state.latest_preferred_topics)==0:
57
- for idea in state.idea:
58
- print('The idea for retrieval:', idea)
59
- result = retrieve_tool(idea+query_prompt)
60
- retrievals.append(result)
61
- print('Retrieval process completed......')
62
- state.retrievals.append(retrievals)
63
-
64
- if len (state.latest_preferred_topics)>0:
65
- print('The preferred_topics are:',state.latest_preferred_topics)
66
- state.preferred_topics.append(state.latest_preferred_topics)
67
- for idea in state.preferred_topics[-1]:
68
- result = retrieve_tool(idea+query_prompt)
69
- retrievals.append(result)
70
- print('Retrieval process completed for preferred_topics......')
71
- state.latest_preferred_topics=[]
72
- state.retrievals.append(retrievals)
73
- return state
74
-
75
- def generate_story(state:State)-> State:
76
- react_agent=create_react_agent(
77
- model=llm_gpt,
78
- tools=[]
79
 
80
- )
81
- if len(state.preferred_topics)==0:
82
- template = initial_story_prompt(state)
83
- else:
84
- template = refined_story_prompt(state)
85
-
86
-
87
- messages = [SystemMessage(content=template),
88
- HumanMessage(content=f'''The idea of the video is:\n{state.idea}\n'''),
89
- FunctionMessage(name='generate_story_function',content=f'''The business details is:\n{state.business_details}\n
90
- The retrieved data of influencers is:\n{state.retrievals[-1]}\n
91
- The information from the image is:\n{state.image_captions[-1]} ''')]
92
 
93
- print('Messages:',messages)
94
-
95
- response = react_agent.invoke({'messages':messages})
96
- response = response['messages'][-1].content
97
- print('The genrated story: ', response)
98
- state.stories.append(response)
99
- return state
100
-
101
-
102
 
103
- def generate_brainstroming(state:State)-> State:
104
-
105
- template= brainstroming_prompt(state)
106
-
107
- messages = [SystemMessage(content=template),
108
- HumanMessage(content=f'''Here is the story to you for brainstorming:\n{state.stories[-1]}'''),
109
- FunctionMessage(content=f'''The details of business is:\n{state.business_details}\n''', name="brainstorm_tool")]
110
- print('Message for brainstorming:',messages)
111
- response = llm_gpt.with_structured_output(BrainstromTopicFormatter).invoke(messages)
112
- response = response.model_dump()
113
- state.brainstroming_topics.append(response)
114
- print('The brainstroming topics are:',state.brainstroming_topics)
115
- return state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
 
118
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ from groq import Groq
3
+ from .state import State
4
+ from .tools import Retrieval
5
  from .state import BrainstromTopicFormatter
6
+ from src.genai.utils.models_loader import llm , llm_gpt , captioning_model
7
+ from langchain_core.messages import SystemMessage ,HumanMessage, FunctionMessage
8
+ from .prompts import image_captioning_prompt , initial_story_prompt , refined_story_prompt , brainstroming_prompt
9
 
10
 
11
+ class ImageCaptioner:
12
+ def __init__(self):
13
+ self.captioning_model = captioning_model
14
+ self.client = Groq(api_key=os.environ.get('GROQ_API_KEY'))
15
+
16
+ def run(self, state: State) -> State:
17
+ if len(state.images)>0:
18
+ if state.images[-1]!=None:
19
+ print('Captioning image')
20
+
21
+ chat_completion = self.client.chat.completions.create(
22
+ messages=[
23
+ {
24
+ "role": "user",
25
+ "content": [
26
+ {"type": "text", "text": image_captioning_prompt(state)},
27
+ {
28
+ "type": "image_url",
29
+ "image_url": {
30
+ "url": f"data:image/jpeg;base64,{state.images[-1]}",
31
+ },
32
+ },
33
+ ],
34
+ }
35
+ ],
36
+ model=self.captioning_model,
37
+ max_completion_tokens=50,
38
+ temperature = 1
39
+ )
40
+ response=chat_completion.choices[0].message.content
41
+ state.image_captions.append(response)
42
+ return state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
+ else:
45
+ state.images.append(None)
46
+ state.image_captions.append(None)
47
+ return state
 
 
 
 
 
 
 
 
48
 
 
 
 
 
 
 
 
 
 
49
 
50
+ class Retriever:
51
+ def __init__(self):
52
+ self.retrievals = []
53
+
54
+ def run(self,state: State) -> State:
55
+ query_prompt = 'Represent this sentence for searching relevant passages: '
56
+ if len(state.latest_preferred_topics)==0:
57
+ for idea in state.idea:
58
+ result = Retrieval(idea+query_prompt).influencers_data()
59
+ self.retrievals.append(result)
60
+ state.retrievals.append(self.retrievals)
61
+
62
+ if len (state.latest_preferred_topics)>0:
63
+ state.preferred_topics.append(state.latest_preferred_topics)
64
+ for idea in state.preferred_topics[-1]:
65
+ result = Retrieval(idea+query_prompt).influencers_data()
66
+ self.retrievals.append(result)
67
+ state.latest_preferred_topics=[]
68
+ state.retrievals.append(self.retrievals)
69
+ return state
70
+
71
+ class StoryGenerator:
72
+ def __init__(self):
73
+ self.llm = llm_gpt
74
+
75
+ def run(self,state:State)-> State:
76
+ if len(state.preferred_topics)==0:
77
+ template = initial_story_prompt(state)
78
+ else:
79
+ template = refined_story_prompt(state)
80
+
81
+
82
+ messages = [SystemMessage(content=template),
83
+ HumanMessage(content=f'''The idea of the video is:\n{state.idea}\n'''),
84
+ FunctionMessage(name='generate_story_function',content=f'''The business details is:\n{state.business_details}\n
85
+ The retrieved data of influencers is:\n{state.retrievals[-1]}\n
86
+ The information from the image is:\n{state.image_captions[-1]} ''')]
87
+
88
+ response = self.llm.invoke(messages)
89
+ response = response.content
90
+ state.stories.append(response)
91
+ return state
92
+
93
+
94
+ class BrainstromTopicGenerator:
95
+ def __init__(self):
96
+ self.llm = llm_gpt
97
+
98
+ def run(self,state:State)-> State:
99
+ template= brainstroming_prompt(state)
100
+
101
+ messages = [SystemMessage(content=template),
102
+ HumanMessage(content=f'''Here is the story to you for brainstorming:\n{state.stories[-1]}'''),
103
+ FunctionMessage(content=f'''The details of business is:\n{state.business_details}\n''', name="brainstorm_tool")]
104
+ print('Message for brainstorming:',messages)
105
+ response = self.llm.with_structured_output(BrainstromTopicFormatter).invoke(messages)
106
+ response = response.model_dump()
107
+ state.brainstroming_topics.append(response)
108
+ print('The brainstroming topics are:',state.brainstroming_topics)
109
+ return state
110
 
111
 
112
 
src/genai/brainstroming_agent/utils/prompts.py CHANGED
@@ -4,7 +4,7 @@ You are a professional prompt engineer for image generation models.
4
 
5
  Your task is to read a full scene-by-scene video story and generate a **single, vivid, high-quality visual prompt** that captures the **overall essence and identity** of the video. The goal is to produce an image that would represent the story's core themes, mood, and style as a **display image or visual thumbnail**.
6
  The scene-by-scene video story will be passed to you by the human.
7
- 🎯 Instructions:
8
  - Summarize the key visual themes, characters, mood, setting, and action from across the entire story.
9
  - Focus on the most visually iconic or emotionally powerful moment.
10
  - Avoid generic phrases. Be specific about details like setting, time of day, lighting, colors, characters, and mood.
 
4
 
5
  Your task is to read a full scene-by-scene video story and generate a **single, vivid, high-quality visual prompt** that captures the **overall essence and identity** of the video. The goal is to produce an image that would represent the story's core themes, mood, and style as a **display image or visual thumbnail**.
6
  The scene-by-scene video story will be passed to you by the human.
7
+ Instructions:
8
  - Summarize the key visual themes, characters, mood, setting, and action from across the entire story.
9
  - Focus on the most visually iconic or emotionally powerful moment.
10
  - Avoid generic phrases. Be specific about details like setting, time of day, lighting, colors, characters, and mood.
src/genai/brainstroming_agent/utils/tools.py CHANGED
@@ -8,46 +8,44 @@ import tiktoken
8
  from src.genai.utils.load_embeddings import caption_index , caption_df
9
  from src.genai.utils.utils import clean_text
10
 
11
- def retrieve_tool(video_topic):
12
- '''
13
- Always invoke this tool.
14
- Retrieve influencer's data by semantic search of **video topic**.
15
- '''
16
- query_embedding = np.array(embedding_model.embed_query(str(video_topic))).reshape(1, -1).astype('float32')
17
- faiss.normalize_L2(query_embedding)
18
-
19
- top_k = len(caption_df)
20
- distances, indices = caption_index.search(query_embedding, top_k)
21
-
22
- similarity_threshold = 0.35
23
- selected = [(idx, sim) for idx, sim in zip(indices[0], distances[0]) if sim >= similarity_threshold]
24
-
25
- if not selected:
26
- return "No influencers found."
27
-
28
- # === Format results ===
29
- outer_list = []
30
- for rank, (idx, sim) in enumerate(selected, 1):
31
- row = caption_df.iloc[idx]
32
- res = {
33
- 'rank': rank,
34
- 'username': row['username'],
35
- 'visible_text_or_brandings': row['visible_texts_or_brandings'],
36
- 'likesCount': row['likesCount'],
37
- 'commentCount': row['commentCount'],
38
- 'product_or_service_details': row['product_or_service_details'],
39
- }
40
-
41
- inner_list = [
42
- f"[{res['rank']}]. The influencer name is: **{res['username']}** Likes: **{res['likesCount']}**, Comments: **{res['commentCount']}**",
43
- f"The branding or promotion done is:\n{res['visible_text_or_brandings']}",
44
- f"The details of product or service is:\n{res['product_or_service_details']}"
45
- ]
46
- outer_list.append(inner_list)
47
-
48
- cleaned_response = clean_text(str(outer_list))
49
- encoding = tiktoken.encoding_for_model('gpt-4o-mini')
50
- tokens = encoding.encode(cleaned_response)
51
- trimmed_response = tokens[:1000]
52
- return encoding.decode(trimmed_response)
53
-
 
8
  from src.genai.utils.load_embeddings import caption_index , caption_df
9
  from src.genai.utils.utils import clean_text
10
 
11
+ class Retrieval:
12
+ def __init__(self, video_topic):
13
+ self.video_topic = video_topic
14
+ self.query_embedding = np.array(embedding_model.embed_query(str(self.video_topic))).reshape(1, -1).astype('float32')
15
+ faiss.normalize_L2(self.query_embedding)
16
+
17
+ def influencers_data(self):
18
+ top_k = len(caption_df)
19
+ distances, indices = caption_index.search(self.query_embedding, top_k)
20
+
21
+ similarity_threshold = 0.35
22
+ selected = [(idx, sim) for idx, sim in zip(indices[0], distances[0]) if sim >= similarity_threshold]
23
+
24
+ if not selected:
25
+ return "No influencers found."
26
+
27
+ outer_list = []
28
+ for rank, (idx, sim) in enumerate(selected, 1):
29
+ row = caption_df.iloc[idx]
30
+ res = {
31
+ 'rank': rank,
32
+ 'username': row['username'],
33
+ 'visible_text_or_brandings': row['visible_texts_or_brandings'],
34
+ 'likesCount': row['likesCount'],
35
+ 'commentCount': row['commentCount'],
36
+ 'product_or_service_details': row['product_or_service_details'],
37
+ }
38
+
39
+ inner_list = [
40
+ f"[{res['rank']}]. The influencer name is: **{res['username']}** — Likes: **{res['likesCount']}**, Comments: **{res['commentCount']}**",
41
+ f"The branding or promotion done is:\n{res['visible_text_or_brandings']}",
42
+ f"The details of product or service is:\n{res['product_or_service_details']}"
43
+ ]
44
+ outer_list.append(inner_list)
45
+
46
+ cleaned_response = clean_text(str(outer_list))
47
+ encoding = tiktoken.encoding_for_model('gpt-4o-mini')
48
+ tokens = encoding.encode(cleaned_response)
49
+ trimmed_response = tokens[:1000]
50
+ return encoding.decode(trimmed_response)
51
+
 
 
src/genai/brainstroming_agent/utils/utils.py CHANGED
@@ -1,6 +1,6 @@
1
 
2
- from langchain_core.messages import SystemMessage, ToolMessage, HumanMessage, FunctionMessage
3
- from .tools import retrieve_tool
4
  import base64
5
  from PIL import Image
6
  from io import BytesIO
@@ -9,129 +9,63 @@ from huggingface_hub import InferenceClient
9
  from .prompts import story_to_prompt , final_story_prompt
10
  import os
11
  from langgraph.prebuilt import create_react_agent
12
- import pandas as pd
13
- from datasets import load_dataset
14
- from src.genai.utils.models_loader import llm_gpt
15
 
 
 
 
 
16
 
 
 
 
 
 
 
 
17
 
18
- def generate_final_story(final_state):
19
- if 'preferred_topics' in final_state:
20
- if len(final_state['preferred_topics'])>0:
21
- template = final_story_prompt(final_state)
22
- messages = [SystemMessage(content=template),
23
- HumanMessage(content=f'''The idea of the video is:\n{final_state['idea']}\n '''),
24
- FunctionMessage(content=f'''The business details is:\n{final_state['business_details']}\nThe data of influencers is:\n{final_state['retrievals'][-1]}''',name='final_story_tool')]
25
- print('The message of final story:',messages)
26
 
27
- react_agent=create_react_agent(
28
- model=llm_gpt,
29
- tools=[])
30
-
31
- for message_chunk , metadata in react_agent.stream({'messages':messages},stream_mode='messages'):
32
- yield message_chunk.content
33
 
 
 
 
34
  else:
35
- for chunk in final_state['stories'][-1]:
36
- yield chunk
37
- else:
38
- template = final_story_prompt(final_state)
39
- influencers_data = retrieve_tool(final_state)
40
- messages = [SystemMessage(content=template),
41
- FunctionMessage(content=f'''The business details is:\n{str(final_state)}\nThe data of influencers is:\n{influencers_data}''',name='final_story_tool')]
42
- react_agent=create_react_agent(
43
- model=llm_gpt,
44
- tools=[])
45
-
46
- for message_chunk , metadata in react_agent.stream({'messages':messages},stream_mode='messages'):
47
- yield message_chunk.content
48
-
49
-
50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  def encode_image_to_base64(uploaded_file: UploadFile) -> str:
53
  return base64.b64encode(uploaded_file.file.read()).decode("utf-8")
54
-
55
 
56
- # Convert base64 string to PIL image (optional for LangGraph processing)
57
  def process_image(base64_str: str) -> Image.Image:
58
  image_data = base64.b64decode(base64_str)
59
  return Image.open(BytesIO(image_data))
60
-
61
-
62
- def generate_prompt(final_story,business_details,refined_ideation):
63
- print('************Entering prompt generator****************')
64
- messages = [SystemMessage(content=story_to_prompt()),
65
- HumanMessage(content=f'''The scene-by-scene video story is {final_story}'''),
66
- FunctionMessage(content=f'''The business details is:\n{business_details}\nThe idea is{refined_ideation}''',name='prompt_generation_id')
67
- ]
68
-
69
- prompt = llm_gpt.invoke(messages)
70
- print('The prompt is:',prompt)
71
- return prompt.content
72
-
73
- def generate_image(final_story, business_details, refined_ideation):
74
- prompt = generate_prompt(final_story, business_details, refined_ideation)
75
- print('************Finished prompt generator****************')
76
-
77
- client = InferenceClient(
78
- provider="hf-inference",
79
- api_key=os.environ.get('HUGGINGFACEHUB_ACCESS_TOKEN'),
80
- )
81
-
82
- print('************Finished calling generator****************')
83
-
84
-
85
- # output is a PIL.Image object
86
- image = client.text_to_image(
87
- prompt,
88
- model="black-forest-labs/FLUX.1-schnell",
89
- )
90
- print('*****************Image Created*******************')
91
-
92
- # Convert image to BytesIO buffer
93
- buffered = BytesIO()
94
- image.save(buffered, format="PNG") # you can also use "JPEG" if preferred
95
- buffered.seek(0)
96
-
97
- # Encode to base64 string
98
- img_base64 = base64.b64encode(buffered.read()).decode("utf-8")
99
- print('*****************Image Encoded to Base64*******************')
100
-
101
- return img_base64
102
-
103
-
104
- def save_to_db(business_details):
105
- dataset = load_dataset("subashdvorak/tiktok-agentic-story")['train']
106
- # dataset = load_influencer_data()
107
- df = pd.DataFrame(dataset)
108
-
109
- # 2. Flatten all business detail values to a set of lowercase strings
110
- all_values = set()
111
- for v in business_details.values():
112
- if isinstance(v, str):
113
- all_values.add(v.lower())
114
- elif isinstance(v, list):
115
- all_values.update(map(str.lower, map(str, v)))
116
-
117
- # 3. Match rows where ANY column contains ANY of the values
118
- def row_matches(row):
119
- return any(
120
- str(cell).lower().find(val) != -1
121
- for cell in row
122
- for val in all_values
123
- )
124
-
125
- # 4. Apply row-wise matching
126
- matched_df = df[df.apply(row_matches, axis=1)]
127
- matched_df.to_csv('extracted_data.csv')
128
-
129
-
130
-
131
-
132
-
133
-
134
-
135
-
136
-
137
-
 
1
 
2
+ from langchain_core.messages import SystemMessage,HumanMessage, FunctionMessage
3
+ from .tools import Retrieval
4
  import base64
5
  from PIL import Image
6
  from io import BytesIO
 
9
  from .prompts import story_to_prompt , final_story_prompt
10
  import os
11
  from langgraph.prebuilt import create_react_agent
12
+ from src.genai.utils.models_loader import llm_gpt, image_generation_model
 
 
13
 
14
+ class FinalStoryGenenrator:
15
+ def __init__(self):
16
+ self.llm = llm_gpt
17
+ self.agent = create_react_agent(model=llm_gpt,tools=[])
18
 
19
+ def generate_final_story(self,final_state):
20
+ if 'preferred_topics' in final_state:
21
+ if len(final_state['preferred_topics'])>0:
22
+ template = final_story_prompt(final_state)
23
+ messages = [SystemMessage(content=template),
24
+ HumanMessage(content=f'''The idea of the video is:\n{final_state['idea']}\n '''),
25
+ FunctionMessage(content=f'''The business details is:\n{final_state['business_details']}\nThe data of influencers is:\n{final_state['retrievals'][-1]}''',name='final_story_tool')]
26
 
 
 
 
 
 
 
 
 
27
 
28
+ for message_chunk , metadata in self.agent.stream({'messages':messages},stream_mode='messages'):
29
+ yield message_chunk.content
 
 
 
 
30
 
31
+ else:
32
+ for chunk in final_state['stories'][-1]:
33
+ yield chunk
34
  else:
35
+ template = final_story_prompt(final_state)
36
+ influencers_data = Retrieval(str(final_state)).influencers_data()
37
+ messages = [SystemMessage(content=template),
38
+ FunctionMessage(content=f'''The business details is:\n{str(final_state)}\nThe data of influencers is:\n{influencers_data}''',name='final_story_tool')]
39
+
40
+ for message_chunk , metadata in self.agent.stream({'messages':messages},stream_mode='messages'):
41
+ yield message_chunk.content
 
 
 
 
 
 
 
 
42
 
43
+ class ImageGenerator:
44
+ def __init__(self):
45
+ self.llm = llm_gpt
46
+ self.image_generation_model = image_generation_model
47
+
48
+ def generate_prompt(self, final_story,business_details,refined_ideation):
49
+ messages = [SystemMessage(content=story_to_prompt()),
50
+ HumanMessage(content=f'''The scene-by-scene video story is {final_story}'''),
51
+ FunctionMessage(content=f'''The business details is:\n{business_details}\nThe idea is{refined_ideation}''',name='prompt_generation_id')
52
+ ]
53
+ prompt = self.llm.invoke(messages)
54
+ return prompt.content
55
+
56
+ def generate_image(self,final_story, business_details, refined_ideation):
57
+ prompt = self.generate_prompt(final_story, business_details, refined_ideation)
58
+ client = InferenceClient(provider="hf-inference",api_key=os.environ.get('HUGGINGFACEHUB_ACCESS_TOKEN'))
59
+ image = client.text_to_image( prompt,model=self.image_generation_model)
60
+ buffered = BytesIO()
61
+ image.save(buffered, format="PNG")
62
+ buffered.seek(0)
63
+ img_base64 = base64.b64encode(buffered.read()).decode("utf-8")
64
+ return img_base64
65
 
66
  def encode_image_to_base64(uploaded_file: UploadFile) -> str:
67
  return base64.b64encode(uploaded_file.file.read()).decode("utf-8")
 
68
 
 
69
  def process_image(base64_str: str) -> Image.Image:
70
  image_data = base64.b64decode(base64_str)
71
  return Image.open(BytesIO(image_data))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/genai/context_analysis_agent/__pycache__/agent.cpython-312.pyc CHANGED
Binary files a/src/genai/context_analysis_agent/__pycache__/agent.cpython-312.pyc and b/src/genai/context_analysis_agent/__pycache__/agent.cpython-312.pyc differ
 
src/genai/context_analysis_agent/agent.py CHANGED
@@ -2,22 +2,23 @@ from langchain_groq import ChatGroq
2
  from langgraph.graph import StateGraph, MessagesState, START, END
3
  from langgraph.checkpoint.memory import MemorySaver
4
  from .utils.state import State
5
- from .utils.nodes import introduction_node, extract_business_details
6
- from src.genai.utils.models_loader import llm
 
7
 
8
  business_state = State()
9
 
10
  class IntroductionChatbot:
11
  def __init__(self):
12
  self.memory = MemorySaver()
13
- self.llm = llm
14
  self.workflow = self._initialize_workflow()
15
  self.interact_agent = self.workflow.compile(checkpointer=self.memory)
16
  self.messages = []
17
 
18
  def _initialize_workflow(self):
19
  workflow = StateGraph(MessagesState)
20
- workflow.add_node("chatbot", lambda state: introduction_node(state, self.llm))
21
  workflow.add_edge(START, "chatbot")
22
  workflow.add_edge("chatbot", END)
23
  return workflow
@@ -37,7 +38,7 @@ class IntroductionChatbot:
37
  return "Thanks for providing all your required business details" in latest_response
38
 
39
  def extract_details(self):
40
- response = extract_business_details(business_state.interactions)
41
  print('Extracted details:', response)
42
  return response
43
 
 
2
  from langgraph.graph import StateGraph, MessagesState, START, END
3
  from langgraph.checkpoint.memory import MemorySaver
4
  from .utils.state import State
5
+ from .utils.nodes import IntroductionNode
6
+ from .utils.utils import DetailsExtractor
7
+ from src.genai.utils.models_loader import llm_gpt
8
 
9
  business_state = State()
10
 
11
  class IntroductionChatbot:
12
  def __init__(self):
13
  self.memory = MemorySaver()
14
+ self.llm = llm_gpt
15
  self.workflow = self._initialize_workflow()
16
  self.interact_agent = self.workflow.compile(checkpointer=self.memory)
17
  self.messages = []
18
 
19
  def _initialize_workflow(self):
20
  workflow = StateGraph(MessagesState)
21
+ workflow.add_node("chatbot", lambda state: IntroductionNode().run(state, self.llm))
22
  workflow.add_edge(START, "chatbot")
23
  workflow.add_edge("chatbot", END)
24
  return workflow
 
38
  return "Thanks for providing all your required business details" in latest_response
39
 
40
  def extract_details(self):
41
+ response = DetailsExtractor().run(business_state.interactions)
42
  print('Extracted details:', response)
43
  return response
44
 
src/genai/context_analysis_agent/utils/__pycache__/nodes.cpython-312.pyc CHANGED
Binary files a/src/genai/context_analysis_agent/utils/__pycache__/nodes.cpython-312.pyc and b/src/genai/context_analysis_agent/utils/__pycache__/nodes.cpython-312.pyc differ
 
src/genai/context_analysis_agent/utils/__pycache__/utils.cpython-312.pyc CHANGED
Binary files a/src/genai/context_analysis_agent/utils/__pycache__/utils.cpython-312.pyc and b/src/genai/context_analysis_agent/utils/__pycache__/utils.cpython-312.pyc differ
 
src/genai/context_analysis_agent/utils/nodes.py CHANGED
@@ -1,21 +1,19 @@
 
1
  from langchain_core.messages import SystemMessage
2
- from .prompts import introduction_prompt, details_extract_prompt
3
- from .state import State , DetailsFormatter
4
- from src.genai.utils.models_loader import llm
 
 
 
 
 
 
 
 
 
5
 
6
- # This node generates chatbot responses using the LLM
7
- def introduction_node(state, llm):
8
- template = introduction_prompt
9
- messages = [SystemMessage(content=template)] + state["messages"]
10
- response = llm.invoke(messages)
11
- return {"messages": [response]}
12
 
13
- # This function extracts structured business details
14
- def extract_business_details(interactions):
15
- template = details_extract_prompt(interactions)
16
- messages = [SystemMessage(content=template)]
17
- response=llm.with_structured_output(DetailsFormatter).invoke(messages)
18
- return response
19
 
20
 
21
 
 
1
+ from .state import DetailsFormatter
2
  from langchain_core.messages import SystemMessage
3
+ from src.genai.utils.models_loader import llm_gpt
4
+ from .prompts import introduction_prompt
5
+
6
+ class IntroductionNode:
7
+ def __init__(self):
8
+ self.llm = llm_gpt
9
+
10
+ def run(self, state, llm):
11
+ template = introduction_prompt
12
+ messages = [SystemMessage(content=template)] + state["messages"]
13
+ response = llm.invoke(messages)
14
+ return {"messages": [response]}
15
 
 
 
 
 
 
 
16
 
 
 
 
 
 
 
17
 
18
 
19
 
src/genai/context_analysis_agent/utils/utils.py CHANGED
@@ -1,29 +1,47 @@
1
- # from utils.data_loader import load_dataset
2
  import pandas as pd
3
- from utils.data_loader import load_influencer_data
4
-
5
- def save_to_db(business_details):
6
- # dataset = load_dataset("subashdvorak/tiktok-agentic-story")['train']
7
- dataset = load_influencer_data()
8
- df = pd.DataFrame(dataset)
9
-
10
- # 2. Flatten all business detail values to a set of lowercase strings
11
- all_values = set()
12
- for v in business_details.values():
13
- if isinstance(v, str):
14
- all_values.add(v.lower())
15
- elif isinstance(v, list):
16
- all_values.update(map(str.lower, map(str, v)))
17
-
18
- # 3. Match rows where ANY column contains ANY of the values
19
- def row_matches(row):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  return any(
21
  str(cell).lower().find(val) != -1
22
  for cell in row
23
  for val in all_values
24
  )
25
 
26
- # 4. Apply row-wise matching
27
- matched_df = df[df.apply(row_matches, axis=1)]
28
- matched_df.to_csv('extracted_data.csv')
29
- print('Dataset updated according to business')
 
 
 
 
 
 
1
  import pandas as pd
2
+ from src.genai.utils.load_embeddings import caption_df
3
+ from src.genai.utils.models_loader import llm_gpt
4
+ from .prompts import details_extract_prompt
5
+ from langchain_core.messages import SystemMessage
6
+ from .state import DetailsFormatter
7
+
8
+ class DetailsExtractor:
9
+ def __init__(self):
10
+ self.llm = llm_gpt
11
+
12
+ def run(interactions):
13
+ template = details_extract_prompt(interactions)
14
+ messages = [SystemMessage(content=template)]
15
+ response=llm_gpt.with_structured_output(DetailsFormatter).invoke(messages)
16
+ return response
17
+
18
+ class SaveToDB:
19
+ def __init__(self, caption_df):
20
+ self.df = caption_df.drop(columns=['embeddings'], errors='ignore')
21
+
22
+ def _prepare_values(self, business_details):
23
+ """Extract lowercase string values from business_details dict."""
24
+ all_values = set()
25
+ for v in business_details.values():
26
+ if isinstance(v, str):
27
+ all_values.add(v.lower())
28
+ elif isinstance(v, list):
29
+ all_values.update(map(str.lower, map(str, v)))
30
+ return all_values
31
+
32
+ def _row_matches(self, row, all_values):
33
+ """Check if any value in all_values exists in the row."""
34
  return any(
35
  str(cell).lower().find(val) != -1
36
  for cell in row
37
  for val in all_values
38
  )
39
 
40
+ def save_to_csv(self, business_details, output_file='extracted_data.csv'):
41
+ """Filter dataframe rows based on business_details and save to CSV."""
42
+ all_values = self._prepare_values(business_details)
43
+ matched_df = self.df[self.df.apply(self._row_matches, axis=1, args=(all_values,))]
44
+ matched_df.to_csv(output_file, index=False)
45
+
46
+
47
+
src/genai/ideation_agent/utils/__pycache__/prompts.cpython-312.pyc CHANGED
Binary files a/src/genai/ideation_agent/utils/__pycache__/prompts.cpython-312.pyc and b/src/genai/ideation_agent/utils/__pycache__/prompts.cpython-312.pyc differ
 
src/genai/ideation_agent/utils/prompts.py CHANGED
@@ -1,8 +1,105 @@
1
 
 
 
 
2
 
 
 
3
 
 
4
 
5
- def ideator_prompt(state):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  return f"""
7
  You are a **bold, imaginative, and culturally-attuned video ideator** trusted by top global brands to craft **crisp, original, and high-impact video concepts** for social platforms like TikTok, Instagram, and YouTube.
8
 
@@ -67,7 +164,7 @@ Now, based on the business details above **and inspired by the 10 IMDb reference
67
 
68
 
69
 
70
- def critic_prompt(state):
71
  return f"""
72
  You are a sharp, imaginative, and detail-oriented **video idea critic**. You’ve been assigned to **critique 4 video ideas** created by another ideator and then **refine or improve** them.
73
  If some **valid and usuable information** of image is provided to you by the user, you have to **strongly focus** in that information of image because that is the reference of what type of idea does the user wants.
 
1
 
2
+ def ideator_prompt(state):
3
+ return f"""
4
+ You are a **precision-focused, culturally-attuned video ideator** trusted by top global brands to craft **crisp, reference-grounded, and high-impact video concepts** for social platforms like TikTok, Instagram, and YouTube.
5
 
6
+ Your task is to create **exactly 4 highly original video ideas**, each in **exactly 40 words**.
7
+ These are not scripts or taglines — they are **powerful conceptual seeds**: short, visual, emotionally charged ideas that can spark full videos.
8
 
9
+ ---
10
 
11
+ ### STRICT CREATIVE SOURCE:
12
+ You are provided **10 reference creative ideas** retrieved from IMDb movie descriptions.
13
+ These are your **ONLY valid creative source**.
14
+ - You MUST carefully read, analyze, and mix elements from these 10 ideas.
15
+ - Do not invent entirely new storylines beyond the styles, tones, emotions, and motifs in the IMDb ideas.
16
+ - All creativity must come from **blending, reimagining, and recombining** parts of the 10 reference ideas.
17
+ - Avoid hallucinating scenes or concepts that are unrelated to the references.
18
+
19
+ ---
20
+
21
+ ### Output Rules:
22
+ - **4 final ideas** only.
23
+ - Each idea must be **exactly 40 words**.
24
+ - Each idea must be **very different** from the others in plot, tone, and visual feel.
25
+ - Begin from an **unexpected or abstract scene**, but base it on the IMDb inspirations.
26
+ - Conclude each idea in a way that subtly connects to the **business details** below.
27
+ - Avoid repeating characters, settings, or themes across the 4 ideas.
28
+ - Must be cinematic, visually vivid, and emotionally engaging.
29
+
30
+ ---
31
+
32
+ ### Business Details:
33
+ {state.business_details[-1]}
34
+
35
+ ---
36
+
37
+ Now, based strictly on the 10 IMDb ideas provided to you as your creative dataset —
38
+ **Analyze them, identify their story arcs, emotions, symbols, and pacing, then mix and adapt them into 4 completely original yet reference-rooted 40-word video concepts**.
39
+ Do not draw from anything outside the provided IMDb ideas.
40
+ Every idea must end in a subtle, creative tie-in to the business details.
41
+ """
42
+
43
+ def critic_prompt(state):
44
+ return f"""
45
+ You are a **precision-focused, detail-oriented video idea critic**. You’ve been assigned to **critique 4 video ideas** created by another ideator and then **refine or improve them strictly based on reference material provided**.
46
+
47
+ ---
48
+
49
+ ### STRICT CREATIVE SOURCE:
50
+ You are provided **10 reference creative ideas** retrieved from IMDb movie descriptions.
51
+ These are your **ONLY valid creative source**.
52
+ - All critique and improvements must be grounded in the tone, emotional beats, plot devices, and styles from these IMDb ideas.
53
+ - You may mix or adapt elements from multiple IMDb ideas, but you must not invent unrelated concepts.
54
+ - No hallucinations or off-reference scenarios.
55
+
56
+ If **valid image information** is provided, it must strongly guide your improvements, as it represents the reference style the user wants.
57
+
58
+ ---
59
+
60
+ ### Your job:
61
+ 1. **Identify flaws collectively** across the 4 original ideas — repetition, weak twist, dull opening, poor link to business details, lack of diversity, etc.
62
+ 2. **Improve or replace** each idea while keeping it aligned with the IMDb references, ensuring strong cinematic depth and emotional impact.
63
+ 3. Maintain the **original’s core** if it’s already good, but refine for clarity, pacing, or strength where possible.
64
+
65
+ ---
66
+
67
+ ### Very Important Creative Guidelines:
68
+ - **Each improved idea must be exactly 40 words**.
69
+ - Begin with an **unexpected or abstract plot, character, or visual scene** (inspired from IMDb references) — never directly with the business context.
70
+ - Conclude each idea with a **creative and subtle tie-in** to the business details below.
71
+ - The 4 improved ideas must be **completely diverse** in theme, tone, and visual feel.
72
+ - Avoid repeating characters, locations, or similar situations across the 4 ideas.
73
+ - Must be cinematic, visually vivid, emotionally resonant, and platform-native (Instagram/TikTok/YouTube).
74
+ - Improvements must be **strictly reference-based** — no unrelated creativity.
75
+
76
+ ---
77
+
78
+ ### Business Details:
79
+ {state.business_details[-1]}
80
+
81
+ ### Original Ideas by Ideator:
82
+ {state.ideator_response[-1]}
83
+
84
+ ---
85
+
86
+ **Format your response like this:**
87
+
88
+ ---
89
+ Faults: Faults in any of the 4 ideas of ideator
90
+
91
+ ---
92
+ Improved Ideas from Critic:
93
+ **improved_first idea of 40 words**
94
+ **improved_second idea of 40 words**
95
+ **improved_third idea of 40 words**
96
+ **improved_fourth idea of 40 words**
97
+
98
+ ---
99
+ """
100
+
101
+
102
+ def ideator_prompt_old(state):
103
  return f"""
104
  You are a **bold, imaginative, and culturally-attuned video ideator** trusted by top global brands to craft **crisp, original, and high-impact video concepts** for social platforms like TikTok, Instagram, and YouTube.
105
 
 
164
 
165
 
166
 
167
+ def critic_prompt_old(state):
168
  return f"""
169
  You are a sharp, imaginative, and detail-oriented **video idea critic**. You’ve been assigned to **critique 4 video ideas** created by another ideator and then **refine or improve** them.
170
  If some **valid and usuable information** of image is provided to you by the user, you have to **strongly focus** in that information of image because that is the reference of what type of idea does the user wants.
src/genai/utils/__pycache__/models_loader.cpython-312.pyc CHANGED
Binary files a/src/genai/utils/__pycache__/models_loader.cpython-312.pyc and b/src/genai/utils/__pycache__/models_loader.cpython-312.pyc differ
 
src/genai/utils/models_loader.py CHANGED
@@ -58,6 +58,7 @@ ideator_llm = llm_anthropic
58
  critic_llm = llm_anthropic
59
  validator_llm = llm_anthropic
60
 
61
-
 
62
 
63
 
 
58
  critic_llm = llm_anthropic
59
  validator_llm = llm_anthropic
60
 
61
+ captioning_model = "meta-llama/llama-4-scout-17b-16e-instruct"
62
+ image_generation_model = "black-forest-labs/FLUX.1-schnell"
63
 
64