diff --git a/__pycache__/logger_config.cpython-311.pyc b/__pycache__/logger_config.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36e00ec1cd2810d645bc9e5136e0210e474501e0 Binary files /dev/null and b/__pycache__/logger_config.cpython-311.pyc differ diff --git a/api/__pycache__/__init__.cpython-311.pyc b/api/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b4456106edbf3fa191b17ac8d12ca741a7e2e4d Binary files /dev/null and b/api/__pycache__/__init__.cpython-311.pyc differ diff --git a/api/__pycache__/backup_prompts.cpython-311.pyc b/api/__pycache__/backup_prompts.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd42bbc9dfb9e43270402d169e73c2508ca5a12c Binary files /dev/null and b/api/__pycache__/backup_prompts.cpython-311.pyc differ diff --git a/api/__pycache__/main.cpython-311.pyc b/api/__pycache__/main.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fdf1e44e0ccc7153550caca7b4ba910c57e0ad1 Binary files /dev/null and b/api/__pycache__/main.cpython-311.pyc differ diff --git a/api/__pycache__/stored_data.cpython-311.pyc b/api/__pycache__/stored_data.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f95ba890652f342847a214fc0fab3c00f2e7ba52 Binary files /dev/null and b/api/__pycache__/stored_data.cpython-311.pyc differ diff --git a/api/routers/__pycache__/__init__.cpython-311.pyc b/api/routers/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ca9b079d9307f8849b42ab9e46f3f8ba6a7ffeb Binary files /dev/null and b/api/routers/__pycache__/__init__.cpython-311.pyc differ diff --git a/api/routers/__pycache__/brainstorm.cpython-311.pyc b/api/routers/__pycache__/brainstorm.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cc256c27963d9a27a98af9d1285306361b38499 Binary files /dev/null and b/api/routers/__pycache__/brainstorm.cpython-311.pyc differ diff --git a/api/routers/__pycache__/context_analysis.cpython-311.pyc b/api/routers/__pycache__/context_analysis.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e253849b6146b6de57327d2bad849d8c760eb925 Binary files /dev/null and b/api/routers/__pycache__/context_analysis.cpython-311.pyc differ diff --git a/api/routers/__pycache__/generate_final_story.cpython-311.pyc b/api/routers/__pycache__/generate_final_story.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52c4895d0408178e0594b4023d61fa39271b7305 Binary files /dev/null and b/api/routers/__pycache__/generate_final_story.cpython-311.pyc differ diff --git a/api/routers/__pycache__/generate_image.cpython-311.pyc b/api/routers/__pycache__/generate_image.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1ff251092c8b3d9d9847a81238b1bf85b1946aa Binary files /dev/null and b/api/routers/__pycache__/generate_image.cpython-311.pyc differ diff --git a/api/routers/__pycache__/human_idea_refining.cpython-311.pyc b/api/routers/__pycache__/human_idea_refining.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f95a2789435ca51f4b4cf2cb08af327df0d732d4 Binary files /dev/null and b/api/routers/__pycache__/human_idea_refining.cpython-311.pyc differ diff --git a/api/routers/__pycache__/ideation.cpython-311.pyc b/api/routers/__pycache__/ideation.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9609dbe075a368edfb298ad3683a68a1330d308d Binary files /dev/null and b/api/routers/__pycache__/ideation.cpython-311.pyc differ diff --git a/api/routers/__pycache__/orchestration.cpython-311.pyc b/api/routers/__pycache__/orchestration.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d7e7e06c0de78d5d2946e9e68ff28a0a56c5d2e Binary files /dev/null and b/api/routers/__pycache__/orchestration.cpython-311.pyc differ diff --git a/api/routers/__pycache__/show_analytics.cpython-311.pyc b/api/routers/__pycache__/show_analytics.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20b43e58f91abaf86f5ac96e1615fa31c6db45cb Binary files /dev/null and b/api/routers/__pycache__/show_analytics.cpython-311.pyc differ diff --git a/api/routers/context_analysis.py b/api/routers/context_analysis.py index 2db856c558b5db281c562e72954b85fc8c8be1b3..9ea5f84aad46510f1ba51cb7b7337701a02cb08c 100644 --- a/api/routers/context_analysis.py +++ b/api/routers/context_analysis.py @@ -27,8 +27,9 @@ def context_analysis(msg: UserMessage): app_logger.info('Executed context analysis agent') last_response = context_analysis_graph.messages[-1]["content"] + print('Last-Response:', last_response) - if context_analysis_graph.is_complete(last_response): + if context_analysis_graph.is_complete(last_response) == 'complete': app_logger.info('Context analysis completed.') try: details = context_analysis_graph.extract_details() diff --git a/api/routers/ideation.py b/api/routers/ideation.py index c0bcee4a5b69d5d3c1510df87b1cc7573526370e..6885515ee5334d7efe314a0e0fc8486586f67066 100644 --- a/api/routers/ideation.py +++ b/api/routers/ideation.py @@ -14,7 +14,7 @@ idea_graph = agent.ideation_graph() @router.post("/ideation") def ideation_endpoint(): - config={"recursion_limit":15, "configurable": {"thread_id": "ideation_thread123"}} + config={"recursion_limit":25, "configurable": {"thread_id": "ideation_thread123"}} try: result = idea_graph.invoke( { @@ -23,11 +23,11 @@ def ideation_endpoint(): }, config=config) - stored_data['final_ideation'] = result['formatted_response'][-1] - stored_data['final_ideation']=ast.literal_eval(stored_data['final_ideation']) + stored_data['final_ideation'] = result['top_4_ideas'][-1] + # stored_data['final_ideation']=ast.literal_eval(stored_data['final_ideation']) app_logger.info('Executed the ideation pipeline.') - return {'response':ast.literal_eval(result['formatted_response'][-1])} + return {'response':result['top_4_ideas'][-1]} except GraphRecursionError: error_logger.error('Ideation loop ran more than specified.') diff --git a/api/schemas/__pycache__/__init__.cpython-311.pyc b/api/schemas/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c3a06968f2bcd2662b04c48b94e7e5d101787f9 Binary files /dev/null and b/api/schemas/__pycache__/__init__.cpython-311.pyc differ diff --git a/api/schemas/__pycache__/brainstorming.cpython-311.pyc b/api/schemas/__pycache__/brainstorming.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1381f15645acb756eca1c10c6ee4319f9893bc2 Binary files /dev/null and b/api/schemas/__pycache__/brainstorming.cpython-311.pyc differ diff --git a/api/schemas/__pycache__/context_analysis.cpython-311.pyc b/api/schemas/__pycache__/context_analysis.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9e1d8101a7e30e3e10e081ef0bc569ba8726cd6 Binary files /dev/null and b/api/schemas/__pycache__/context_analysis.cpython-311.pyc differ diff --git a/api/schemas/__pycache__/human_idea_refining.cpython-311.pyc b/api/schemas/__pycache__/human_idea_refining.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ae6ccb584f85184c765cff43a814e89c557bdf7 Binary files /dev/null and b/api/schemas/__pycache__/human_idea_refining.cpython-311.pyc differ diff --git a/api/schemas/__pycache__/orchestration.cpython-311.pyc b/api/schemas/__pycache__/orchestration.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a6fad842e342d41083a1738b11532630247d29b Binary files /dev/null and b/api/schemas/__pycache__/orchestration.cpython-311.pyc differ diff --git a/check.py b/check.py new file mode 100644 index 0000000000000000000000000000000000000000..af1cbd9ef78a45bb9b0807529f0bf66ef367c63b --- /dev/null +++ b/check.py @@ -0,0 +1,13 @@ +from src.genai.utils.models_loader import llm_gpt +from src.genai.ideation_agent.utils.prompts import ideator_prompt_v3 +from src.genai.ideation_agent.utils.schemas import ideation_json_schema +from api.stored_data import stored_data +from langchain_core.messages import HumanMessage , SystemMessage + +template =ideator_prompt_v3(stored_data['business_details']) +messages = [SystemMessage(content=template), + HumanMessage(content=f'''The business_details is\n{stored_data['business_details']}\n + The information of the image is:\n''')] + +response = llm_gpt.with_structured_output(ideation_json_schema).invoke(messages) +print(response) \ No newline at end of file diff --git a/logs/access.log b/logs/access.log index 02cd7fb3b2f94e24587c0343d37245e2c9e78e7f..12a075a3a2867f1ad85162354bd6761d220f2e00 100644 --- a/logs/access.log +++ b/logs/access.log @@ -381,3 +381,104 @@ 2025-08-17 15:22:52,619 | INFO | access_logger | api\main.py:21 | Response status: 200 2025-08-17 15:23:25,844 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/ideation 2025-08-17 15:24:08,120 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-19 16:02:02,820 | INFO | access_logger | api\main.py:19 | Request: GET http://127.0.0.1:8000/docs +2025-08-19 16:02:02,847 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-19 16:02:03,185 | INFO | access_logger | api\main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json +2025-08-19 16:02:03,669 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-19 16:02:14,935 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/ideation +2025-08-19 16:12:43,018 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/ideation +2025-08-19 16:30:36,700 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/ideation +2025-08-19 16:39:11,277 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/ideation +2025-08-19 17:15:35,457 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/ideation +2025-08-20 12:26:51,641 | INFO | access_logger | api\main.py:19 | Request: GET http://127.0.0.1:8000/docs +2025-08-20 12:26:51,650 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-20 12:26:52,199 | INFO | access_logger | api\main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json +2025-08-20 12:26:52,324 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-20 12:26:58,811 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/ideation +2025-08-20 12:49:37,673 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/ideation +2025-08-20 14:02:48,043 | INFO | access_logger | api\main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json +2025-08-20 14:02:48,353 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-20 14:04:00,398 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/ideation +2025-08-20 14:18:25,067 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/ideation +2025-08-20 14:19:26,780 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-20 14:22:59,676 | INFO | access_logger | api\main.py:19 | Request: GET http://127.0.0.1:8000/docs +2025-08-20 14:22:59,678 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-20 14:23:02,148 | INFO | access_logger | api\main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json +2025-08-20 14:23:02,477 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-20 14:34:06,588 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/ideation +2025-08-20 14:41:22,386 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/ideation +2025-08-20 14:42:13,807 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-20 15:48:39,672 | INFO | access_logger | api\main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json +2025-08-20 15:48:40,001 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-20 15:48:46,301 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/ideation +2025-08-20 15:49:43,697 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 15:13:40,245 | INFO | access_logger | api\main.py:19 | Request: GET http://127.0.0.1:8000/docs +2025-08-22 15:13:40,339 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 15:13:41,295 | INFO | access_logger | api\main.py:19 | Request: GET http://127.0.0.1:8000/openapi.json +2025-08-22 15:13:42,007 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 15:14:26,827 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 15:14:26,928 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 15:14:46,025 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 15:14:46,028 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 15:15:05,845 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 15:15:05,847 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 15:15:35,529 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 15:15:35,531 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 15:15:57,354 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 15:15:57,361 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 15:16:10,105 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 15:16:10,108 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 15:16:58,064 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 15:16:58,067 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 15:17:22,771 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 15:17:22,773 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 15:18:03,110 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 15:18:03,113 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 15:18:29,059 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 15:18:29,061 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 15:20:52,901 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 15:20:52,904 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:03:29,119 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:03:29,213 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:03:43,890 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:03:43,893 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:03:57,411 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:03:57,413 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:04:11,077 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:04:11,080 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:04:22,746 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:04:22,749 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:04:37,207 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:04:37,284 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:04:52,517 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:04:52,519 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:05:23,142 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:05:23,144 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:05:42,717 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:05:42,719 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:28:12,070 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:28:12,096 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:32:35,078 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:32:35,170 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:32:47,773 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:32:47,775 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:32:59,712 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:32:59,714 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:33:23,642 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:33:23,644 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:33:34,480 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:33:34,482 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:33:45,378 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:33:45,380 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:34:02,049 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:34:02,051 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:34:16,163 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:34:16,165 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:34:30,074 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:34:30,076 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:34:49,371 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:34:49,373 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:40:10,586 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:40:10,680 | INFO | access_logger | api\main.py:21 | Response status: 200 +2025-08-22 16:45:45,350 | INFO | access_logger | api\main.py:19 | Request: POST http://127.0.0.1:8000/api/context-analysis +2025-08-22 16:45:46,127 | INFO | access_logger | api\main.py:21 | Response status: 200 diff --git a/logs/app.log b/logs/app.log index 944fc4dd5809a7232c9241ce608da1a63e7e40dd..210301e67c2cce7f1c7e8ec4abca0b5c35715c96 100644 --- a/logs/app.log +++ b/logs/app.log @@ -33,3 +33,36 @@ 2025-08-17 14:13:45,517 | INFO | app_logger | api\routers\show_analytics.py:14 | Influencer Analytics returned by orchestrator. 2025-08-17 15:22:52,597 | INFO | app_logger | api\routers\show_analytics.py:14 | Influencer Analytics returned by orchestrator. 2025-08-17 15:24:08,119 | INFO | app_logger | api\routers\ideation.py:28 | Executed the ideation pipeline. +2025-08-20 14:42:13,597 | INFO | app_logger | api\routers\ideation.py:28 | Executed the ideation pipeline. +2025-08-20 15:49:43,187 | INFO | app_logger | api\routers\ideation.py:28 | Executed the ideation pipeline. +2025-08-22 15:14:30,556 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 15:14:47,390 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 15:15:08,142 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 15:15:37,181 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 15:15:58,251 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 15:16:11,455 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 15:16:59,158 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 15:17:26,486 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 15:18:06,522 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 15:18:30,038 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 15:20:54,775 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:03:31,947 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:03:45,991 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:03:59,045 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:04:12,079 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:04:24,354 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:04:38,884 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:04:54,600 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:05:24,378 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:05:46,373 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:28:13,344 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:32:37,212 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:32:48,691 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:33:01,635 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:33:24,859 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:33:35,725 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:33:46,403 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:34:03,128 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:34:17,796 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:34:33,195 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent +2025-08-22 16:34:51,470 | INFO | app_logger | api\routers\context_analysis.py:27 | Executed context analysis agent diff --git a/logs/errors.log b/logs/errors.log index 16a78e6183f72a1da170079c07496e91a45f7509..f93013567211b71b3a7d436f1d74fe0cfcc5f03f 100644 --- a/logs/errors.log +++ b/logs/errors.log @@ -1 +1,2 @@ 2025-08-13 12:14:46,509 | ERROR | error_logger | Error while showing analytics: retrieve_data_for_analytics() missing 1 required positional argument: 'business_details' +2025-08-20 14:19:20,731 | ERROR | error_logger | api\routers\ideation.py:33 | Ideation loop ran more than specified. diff --git a/requirements.txt b/requirements.txt index f3cd81bb1051f95a2829f5bc02b5211cce37b4c3..49c1f6f1d8554d87928f29a926c1c53f9522fa37 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ langgraph langsmith langchain_groq -pydantic +pydantic==2.11.7 datasets faiss-cpu dotenv diff --git a/src/genai/__pycache__/__init__.cpython-311.pyc b/src/genai/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..683fe3a2a5b1f1072c6dd1797ea7747dd9fb18c5 Binary files /dev/null and b/src/genai/__pycache__/__init__.cpython-311.pyc differ diff --git a/src/genai/brainstroming_agent/__pycache__/__init__.cpython-311.pyc b/src/genai/brainstroming_agent/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0415a41e5f4ddafe18f640f96196bbc8914a429 Binary files /dev/null and b/src/genai/brainstroming_agent/__pycache__/__init__.cpython-311.pyc differ diff --git a/src/genai/brainstroming_agent/__pycache__/agent.cpython-311.pyc b/src/genai/brainstroming_agent/__pycache__/agent.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08dc8338a0fa971f72ae0769f4c7e7ce89fc593a Binary files /dev/null and b/src/genai/brainstroming_agent/__pycache__/agent.cpython-311.pyc differ diff --git a/src/genai/brainstroming_agent/utils/__pycache__/__init__.cpython-311.pyc b/src/genai/brainstroming_agent/utils/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f5eb0ef0c4eab69c8c87db221c42ef01a59e41b Binary files /dev/null and b/src/genai/brainstroming_agent/utils/__pycache__/__init__.cpython-311.pyc differ diff --git a/src/genai/brainstroming_agent/utils/__pycache__/nodes.cpython-311.pyc b/src/genai/brainstroming_agent/utils/__pycache__/nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ba97e1f8f39472b36a4ad0f798e1a108a544ac9 Binary files /dev/null and b/src/genai/brainstroming_agent/utils/__pycache__/nodes.cpython-311.pyc differ diff --git a/src/genai/brainstroming_agent/utils/__pycache__/nodes.cpython-312.pyc b/src/genai/brainstroming_agent/utils/__pycache__/nodes.cpython-312.pyc index d7049536dab310d967f83ed31603f1d3be01bea3..9c0e7526572ec0d7b48d1cdae0b7151bbe0dc599 100644 Binary files a/src/genai/brainstroming_agent/utils/__pycache__/nodes.cpython-312.pyc and b/src/genai/brainstroming_agent/utils/__pycache__/nodes.cpython-312.pyc differ diff --git a/src/genai/brainstroming_agent/utils/__pycache__/prompts.cpython-311.pyc b/src/genai/brainstroming_agent/utils/__pycache__/prompts.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85d085a8bd67a219d60d47b0df33539980f8bb5f Binary files /dev/null and b/src/genai/brainstroming_agent/utils/__pycache__/prompts.cpython-311.pyc differ diff --git a/src/genai/brainstroming_agent/utils/__pycache__/state.cpython-311.pyc b/src/genai/brainstroming_agent/utils/__pycache__/state.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae54bdf70e2b97ea5a8a3755c9a97498fa81dc54 Binary files /dev/null and b/src/genai/brainstroming_agent/utils/__pycache__/state.cpython-311.pyc differ diff --git a/src/genai/brainstroming_agent/utils/__pycache__/tools.cpython-311.pyc b/src/genai/brainstroming_agent/utils/__pycache__/tools.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0b0d7156a18dc02fad4e453cbe6f041e8a9ec0e Binary files /dev/null and b/src/genai/brainstroming_agent/utils/__pycache__/tools.cpython-311.pyc differ diff --git a/src/genai/brainstroming_agent/utils/__pycache__/tools.cpython-312.pyc b/src/genai/brainstroming_agent/utils/__pycache__/tools.cpython-312.pyc index 20c3b3e8e389784ce4832ddd45c71bf3117d79ef..c814534bb18cd3cb606895f9a2e4ceb195ed5e87 100644 Binary files a/src/genai/brainstroming_agent/utils/__pycache__/tools.cpython-312.pyc and b/src/genai/brainstroming_agent/utils/__pycache__/tools.cpython-312.pyc differ diff --git a/src/genai/brainstroming_agent/utils/__pycache__/utils.cpython-311.pyc b/src/genai/brainstroming_agent/utils/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..405aa47ecc46a1ef2c132371a103a66eeffb4948 Binary files /dev/null and b/src/genai/brainstroming_agent/utils/__pycache__/utils.cpython-311.pyc differ diff --git a/src/genai/context_analysis_agent/__pycache__/__init__.cpython-311.pyc b/src/genai/context_analysis_agent/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9026c3626c1f6c59c5d32ca39dedeaadf790ea84 Binary files /dev/null and b/src/genai/context_analysis_agent/__pycache__/__init__.cpython-311.pyc differ diff --git a/src/genai/context_analysis_agent/__pycache__/agent.cpython-311.pyc b/src/genai/context_analysis_agent/__pycache__/agent.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd16f6b833777fd9537bc929ab1496cf773ca06a Binary files /dev/null and b/src/genai/context_analysis_agent/__pycache__/agent.cpython-311.pyc differ diff --git a/src/genai/context_analysis_agent/agent.py b/src/genai/context_analysis_agent/agent.py index 89cd42d2d4deb8c16a410a995df6259b9eec1190..613b0a3f2aa53a4748a2bb723985072162e5a036 100644 --- a/src/genai/context_analysis_agent/agent.py +++ b/src/genai/context_analysis_agent/agent.py @@ -1,10 +1,12 @@ import logging +from langchain_core.messages import SystemMessage , HumanMessage from langgraph.graph import StateGraph, MessagesState, START, END from langgraph.checkpoint.memory import MemorySaver -from .utils.state import State +from .utils.state import State, CompletionFormatter from .utils.nodes import IntroductionNode from .utils.utils import DetailsExtractor from src.genai.utils.models_loader import llm_gpt +from .utils.prompts import completion_check_prompt business_state = State() @@ -37,7 +39,10 @@ class IntroductionChatbot: def is_complete(self, latest_response: str) -> bool: - return "Thanks for providing all your required business details" in latest_response + messages = [SystemMessage(content=completion_check_prompt()),HumanMessage(content=f'''The response of assistant is: {latest_response}''')] + response = llm_gpt.with_structured_output(CompletionFormatter).invoke(messages) + print('Completion response:', response.completion) + return response.completion def extract_details(self): response = DetailsExtractor(business_state.interactions).run() diff --git a/src/genai/context_analysis_agent/utils/__pycache__/__init__.cpython-311.pyc b/src/genai/context_analysis_agent/utils/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdac64027da1462ee66cf1a3e261627e8ab2220d Binary files /dev/null and b/src/genai/context_analysis_agent/utils/__pycache__/__init__.cpython-311.pyc differ diff --git a/src/genai/context_analysis_agent/utils/__pycache__/nodes.cpython-311.pyc b/src/genai/context_analysis_agent/utils/__pycache__/nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c76d16d198707030a9eb66aa6d3f6556a014e4a9 Binary files /dev/null and b/src/genai/context_analysis_agent/utils/__pycache__/nodes.cpython-311.pyc differ diff --git a/src/genai/context_analysis_agent/utils/__pycache__/prompts.cpython-311.pyc b/src/genai/context_analysis_agent/utils/__pycache__/prompts.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91ca8f31952762af4a1072ee0093e1b0c013d5e3 Binary files /dev/null and b/src/genai/context_analysis_agent/utils/__pycache__/prompts.cpython-311.pyc differ diff --git a/src/genai/context_analysis_agent/utils/__pycache__/prompts.cpython-312.pyc b/src/genai/context_analysis_agent/utils/__pycache__/prompts.cpython-312.pyc index e17e87150ac37dfa603bf47c4edea4ad7beb2c6c..d18ffc19965ef7150c1e492f4576cad274ecf074 100644 Binary files a/src/genai/context_analysis_agent/utils/__pycache__/prompts.cpython-312.pyc and b/src/genai/context_analysis_agent/utils/__pycache__/prompts.cpython-312.pyc differ diff --git a/src/genai/context_analysis_agent/utils/__pycache__/state.cpython-311.pyc b/src/genai/context_analysis_agent/utils/__pycache__/state.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7b93ad4384c64e22e2e4799ee37c84206c0b931 Binary files /dev/null and b/src/genai/context_analysis_agent/utils/__pycache__/state.cpython-311.pyc differ diff --git a/src/genai/context_analysis_agent/utils/__pycache__/state.cpython-312.pyc b/src/genai/context_analysis_agent/utils/__pycache__/state.cpython-312.pyc index 1f2bd8fe1a40ece168cc53fc8781b1cf67ea2332..d3d2d3a482875996cd3fcfda86dd242c4e72c709 100644 Binary files a/src/genai/context_analysis_agent/utils/__pycache__/state.cpython-312.pyc and b/src/genai/context_analysis_agent/utils/__pycache__/state.cpython-312.pyc differ diff --git a/src/genai/context_analysis_agent/utils/__pycache__/utils.cpython-311.pyc b/src/genai/context_analysis_agent/utils/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42db3daa5b62370565e229478779d35ce23a634e Binary files /dev/null and b/src/genai/context_analysis_agent/utils/__pycache__/utils.cpython-311.pyc differ diff --git a/src/genai/context_analysis_agent/utils/nodes.py b/src/genai/context_analysis_agent/utils/nodes.py index 5046f53865c63f9091b5e9eb73dd1c4c5d4a01a8..d953baea0efd7c828ba3d06df7aa11817dbe3cd0 100644 --- a/src/genai/context_analysis_agent/utils/nodes.py +++ b/src/genai/context_analysis_agent/utils/nodes.py @@ -1,6 +1,7 @@ from langchain_core.messages import SystemMessage from src.genai.utils.models_loader import llm_gpt from .prompts import introduction_prompt +from .state import ConversationFormatter class IntroductionNode: @@ -10,8 +11,13 @@ class IntroductionNode: def run(self, state, llm): template = introduction_prompt messages = [SystemMessage(content=template)] + state["messages"] - response = llm.invoke(messages) - return {"messages": [response]} + response = llm.with_structured_output(ConversationFormatter).invoke(messages) + print('The response:', response) + print('Type of response:', type(response)) + if 'True' in response.complete: + return {'messages':['completed']} + else: + return {"messages": [response.response]} diff --git a/src/genai/context_analysis_agent/utils/prompts.py b/src/genai/context_analysis_agent/utils/prompts.py index 234db20f5fdfc9514fc2e03cb4650b15bde55378..48837d79fae0dc180d4caecaa9921d4d87cedfcc 100644 --- a/src/genai/context_analysis_agent/utils/prompts.py +++ b/src/genai/context_analysis_agent/utils/prompts.py @@ -1,38 +1,45 @@ introduction_prompt = ''' -You are a business assistant who collects only valid and relevant data. +You are a business assistant who collects only valid and relevant data of the brands. +Those brands connects with influencers to grow their business. Your job is to gather details from business owners in a friendly and conversational manner to understand their business better. Ask in very easy and short way. No matter what the user asks, you have to say to user that we have to collect these details first and only you can move forward. If user asks you for some other queries related to influencers, marketing, video ideas etc or anything. Don't say i'm not here to help you. Just say, First i will collect your all the details and only can help you analyzing your details. You have to say user to be patient until al the details are collected. We need these details: -1. Business Type (e.g., e-commerce, SaaS, consulting), +1. Business Type and Name (e.g., e-commerce, SaaS, consulting), 2. Platform(s) used (e.g., website, app, Instagram), 3. Target Audience (who are their customers or clients), 4. Business Goals (short-term or long-term objectives), 5. Offerings (products or services they provide), -6. Challenges faced (any current business problems or limitations). +6. Devices used to create the videos.(smart phones , camera , high end gadgets etc.) +7. Challenges faced (any current business problems or limitations). +8. Lastly, ask about any queries or details that the business wants to say regarding their video idea and content creation. This is kind of additional details. Keep interacting until all valid details are collected. VERY IMPORTANT: Once all valid details are received, say: '**Thanks for providing all your required business details.**' +Give the output in structured format like this: +response: Your complete response here for details collection +complete: True or False. If the details extraction is completed, return True. If not completed return False. ''' +def completion_check_prompt(): + return f''' +You are a perfect checker who checks whether the assistant has ended the conversation with user or not by analyzing it's response. +If the assistant has ended the conversation , it will say the message: **Thanks for providing your required business details** or something thank you message like this. +The assistant will not further ask any questions if the conversation is completed. If the assistant is asking any question , it means the conversation is not completed. +So you are working as an indicator here who gives the signal whether the conversation is **completed** or **not completed**. + +Output Format: +Just give the output in one word. Either **completed** or **not completed**. +''' def details_extract_prompt(interactions): - return( f''' You're provided with the messages of business interactions between the user and assistant. - Extract the following details of the business from the conversation in the form of dictionary. Don't give any further explanations either in the beginning or ending of the response. - The details you have to extract are:\n - - ----------********--------------- - "business_type": "...", - "platform": "...", - "target_audience": "...", - "business_goals": "...", - "offerings": "...", - "Challenges_faced": "..." - ----------********--------------- + return( f''' You're provided with the messages of business interactions between the business and AI assistant. + Extract the following details of the business from the conversation as it is. Don't trim or simplify any of the business details. You must not have to lose any information. + No problem if the details are long, but give the business details as it is from the conversation. Now, start doing your work:\n The conversation is:\n{interactions}\n diff --git a/src/genai/context_analysis_agent/utils/state.py b/src/genai/context_analysis_agent/utils/state.py index ae2adeea92545951023188c02a958ff7287ef2ff..0eda9ecdc40ae12201178649ee15041f2a20d4dd 100644 --- a/src/genai/context_analysis_agent/utils/state.py +++ b/src/genai/context_analysis_agent/utils/state.py @@ -10,9 +10,19 @@ class DetailsFormatter(BaseModel): ''' Format the details from the business interaction of User and Assistant ''' - business_type: str = Field(description="The type of the business") - platform: str = Field(description="The platform used for the business") + business_type_or_name: str = Field(description="The type or name of the business.") + platform: str = Field(description="The platform used for the promotion of business") target_audience: str = Field(description="The target audience of the business") business_goals: str = Field(description="The business goals of the business") offerings: str = Field(description="The offerings of the business") - Challenges_faced: str = Field(description="The challenges faced by the business") \ No newline at end of file + devices_used_to_create_videos: str = Field(description="The devices used to create videos.") + Challenges_faced: str = Field(description="The challenges faced by the business") + additional_informations: str = Field(description="Additional queries or details regarding the video or idea creation.") + +class CompletionFormatter(BaseModel): + completion: str = Field(description="Just the one word result: **completed** or **not completed**") + +class ConversationFormatter(BaseModel): + response: str = Field(description="The entire response of the assistant.") + complete: str = Field(description= "Return 'True' if the details extraction is completed. Return 'False' if not completed.") + diff --git a/src/genai/human_refined_ideation/__pycache__/__init__.cpython-311.pyc b/src/genai/human_refined_ideation/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..501f6aa7ec0697858bd482c15ddf73b37759b064 Binary files /dev/null and b/src/genai/human_refined_ideation/__pycache__/__init__.cpython-311.pyc differ diff --git a/src/genai/human_refined_ideation/__pycache__/agent.cpython-311.pyc b/src/genai/human_refined_ideation/__pycache__/agent.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90deec9261c807bd8b9ffe40cabd65f26ee0e9e3 Binary files /dev/null and b/src/genai/human_refined_ideation/__pycache__/agent.cpython-311.pyc differ diff --git a/src/genai/human_refined_ideation/utils/__pycache__/__init__.cpython-311.pyc b/src/genai/human_refined_ideation/utils/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..720ad1ad1e8c6d0122bedcd0881bc6a9e28336b7 Binary files /dev/null and b/src/genai/human_refined_ideation/utils/__pycache__/__init__.cpython-311.pyc differ diff --git a/src/genai/human_refined_ideation/utils/__pycache__/nodes.cpython-311.pyc b/src/genai/human_refined_ideation/utils/__pycache__/nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcd180284a51167447a6b3462044c87ab96a9f4e Binary files /dev/null and b/src/genai/human_refined_ideation/utils/__pycache__/nodes.cpython-311.pyc differ diff --git a/src/genai/human_refined_ideation/utils/__pycache__/nodes.cpython-312.pyc b/src/genai/human_refined_ideation/utils/__pycache__/nodes.cpython-312.pyc index 667e6f0eca1801a59342405f4fa3f5307ca4bbd6..624bd979922e70c214b4292076389f391b870d31 100644 Binary files a/src/genai/human_refined_ideation/utils/__pycache__/nodes.cpython-312.pyc and b/src/genai/human_refined_ideation/utils/__pycache__/nodes.cpython-312.pyc differ diff --git a/src/genai/human_refined_ideation/utils/__pycache__/prompts.cpython-311.pyc b/src/genai/human_refined_ideation/utils/__pycache__/prompts.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8551702eb1917d6f5196b85a40f5cef1ef03385b Binary files /dev/null and b/src/genai/human_refined_ideation/utils/__pycache__/prompts.cpython-311.pyc differ diff --git a/src/genai/human_refined_ideation/utils/__pycache__/state.cpython-311.pyc b/src/genai/human_refined_ideation/utils/__pycache__/state.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fecca203730cf4988e9af9068f413bed14cce0e Binary files /dev/null and b/src/genai/human_refined_ideation/utils/__pycache__/state.cpython-311.pyc differ diff --git a/src/genai/ideation_agent/__pycache__/__init__.cpython-311.pyc b/src/genai/ideation_agent/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ed6d346302366f526fac83ff11aa38768d3ddc1 Binary files /dev/null and b/src/genai/ideation_agent/__pycache__/__init__.cpython-311.pyc differ diff --git a/src/genai/ideation_agent/__pycache__/agent.cpython-311.pyc b/src/genai/ideation_agent/__pycache__/agent.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..988e858fc68b7a6e8eace00e05972278058ad2d3 Binary files /dev/null and b/src/genai/ideation_agent/__pycache__/agent.cpython-311.pyc differ diff --git a/src/genai/ideation_agent/__pycache__/agent.cpython-312.pyc b/src/genai/ideation_agent/__pycache__/agent.cpython-312.pyc index 202d86789941f4e4c22cd274966c9a360825e9fd..87ba8cf2ee1b647f3945bc023b36c2708ea67abd 100644 Binary files a/src/genai/ideation_agent/__pycache__/agent.cpython-312.pyc and b/src/genai/ideation_agent/__pycache__/agent.cpython-312.pyc differ diff --git a/src/genai/ideation_agent/agent.py b/src/genai/ideation_agent/agent.py index 5ebab82632737b2eb2d3dbc2ad6d1b35894fb717..98d0bf932a83eacaa7e6dec9ef10d9be5f90410f 100644 --- a/src/genai/ideation_agent/agent.py +++ b/src/genai/ideation_agent/agent.py @@ -1,6 +1,6 @@ from langgraph.graph import StateGraph, START, END , MessagesState from .utils.state import State -from .utils.nodes import IdeatorNode , CriticNode , FormatResponseNode, Validators , RoutingsAfterValidation +from .utils.nodes import RetrieverNode, IdeatorNode , CriticNode , ValidatorNode , RoutingAfterValidation, JudgeNode1 , JudgeNode2, Aggregrator from langgraph.checkpoint.memory import MemorySaver class IdeationAgent: @@ -9,23 +9,27 @@ class IdeationAgent: def ideation_graph(self): graph_builder= StateGraph(State) + graph_builder.add_node("retriever", RetrieverNode().run) graph_builder.add_node("ideator", IdeatorNode().run) graph_builder.add_node("critic", CriticNode().run) - graph_builder.add_node("format_response", FormatResponseNode().run) - graph_builder.add_node("validator1", Validators().run_validator1) - graph_builder.add_node("validator2", Validators().run_validator2) + graph_builder.add_node("judge1", JudgeNode1().run) + graph_builder.add_node("judge2", JudgeNode2().run) + graph_builder.add_node("aggregrator", Aggregrator().run) + graph_builder.add_node("validator", ValidatorNode().run) - graph_builder.add_edge(START, "ideator") # Start the graph with node_1 + graph_builder.add_edge(START, "retriever") + graph_builder.add_edge("retriever", "ideator") graph_builder.add_edge("ideator", "critic") - graph_builder.add_edge("critic", "format_response") - graph_builder.add_edge("format_response", "validator1") - graph_builder.add_edge("validator1", "validator2") - graph_builder.add_edge("validator2", END) + graph_builder.add_edge("critic", "judge1") + graph_builder.add_edge("critic", "judge2") + graph_builder.add_edge("judge1", "aggregrator") + graph_builder.add_edge("judge2", "aggregrator") + graph_builder.add_edge("aggregrator", "validator") + graph_builder.add_edge("validator", END) # Use conditional routing from validator - graph_builder.add_conditional_edges("validator1", RoutingsAfterValidation().route1,{False:'critic',True:'validator2'}) - graph_builder.add_conditional_edges("validator2", RoutingsAfterValidation().route2,{False:'critic',True:END}) + graph_builder.add_conditional_edges("validator", RoutingAfterValidation().route,{False:'critic',True:END}) return graph_builder.compile(checkpointer=self.memory) diff --git a/src/genai/ideation_agent/utils/__pycache__/nodes.cpython-311.pyc b/src/genai/ideation_agent/utils/__pycache__/nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02e29220e1bc6e79c9837d3f71ca6067cfc53a69 Binary files /dev/null and b/src/genai/ideation_agent/utils/__pycache__/nodes.cpython-311.pyc differ diff --git a/src/genai/ideation_agent/utils/__pycache__/nodes.cpython-312.pyc b/src/genai/ideation_agent/utils/__pycache__/nodes.cpython-312.pyc index 8fcebbfb18b64b6a1393a548fb25f1706819fe79..a9638e11d7d02ca0db7185ed9b6bc7aabc154d7e 100644 Binary files a/src/genai/ideation_agent/utils/__pycache__/nodes.cpython-312.pyc and b/src/genai/ideation_agent/utils/__pycache__/nodes.cpython-312.pyc differ diff --git a/src/genai/ideation_agent/utils/__pycache__/prompts.cpython-311.pyc b/src/genai/ideation_agent/utils/__pycache__/prompts.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..321bedcef7e0a0d186ae0202c57590d3ac49657e Binary files /dev/null and b/src/genai/ideation_agent/utils/__pycache__/prompts.cpython-311.pyc differ diff --git a/src/genai/ideation_agent/utils/__pycache__/prompts.cpython-312.pyc b/src/genai/ideation_agent/utils/__pycache__/prompts.cpython-312.pyc index a6cdd422e90a44a88a1e2571a294ddf78f2f7d37..7912d88056ea739428cf15a653c8d3a6e704c013 100644 Binary files a/src/genai/ideation_agent/utils/__pycache__/prompts.cpython-312.pyc and b/src/genai/ideation_agent/utils/__pycache__/prompts.cpython-312.pyc differ diff --git a/src/genai/ideation_agent/utils/__pycache__/schemas.cpython-311.pyc b/src/genai/ideation_agent/utils/__pycache__/schemas.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a77d6cc2d6f4d0db8187a3b6f03c2872b9b6e340 Binary files /dev/null and b/src/genai/ideation_agent/utils/__pycache__/schemas.cpython-311.pyc differ diff --git a/src/genai/ideation_agent/utils/__pycache__/state.cpython-311.pyc b/src/genai/ideation_agent/utils/__pycache__/state.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83c189c8032f09fa2e96e405310fabc1a38da079 Binary files /dev/null and b/src/genai/ideation_agent/utils/__pycache__/state.cpython-311.pyc differ diff --git a/src/genai/ideation_agent/utils/__pycache__/state.cpython-312.pyc b/src/genai/ideation_agent/utils/__pycache__/state.cpython-312.pyc index 384eaeb6a5707443b912ffc83be4162081103e53..9d9333ecbfbbf47a1b8b34d78f017c0a50f0373c 100644 Binary files a/src/genai/ideation_agent/utils/__pycache__/state.cpython-312.pyc and b/src/genai/ideation_agent/utils/__pycache__/state.cpython-312.pyc differ diff --git a/src/genai/ideation_agent/utils/__pycache__/tools.cpython-311.pyc b/src/genai/ideation_agent/utils/__pycache__/tools.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f4a2b0a97d7acb06815241de05b926871b64fe7 Binary files /dev/null and b/src/genai/ideation_agent/utils/__pycache__/tools.cpython-311.pyc differ diff --git a/src/genai/ideation_agent/utils/nodes.py b/src/genai/ideation_agent/utils/nodes.py index 576400e2147bb793bdaa79cf43621e52379640c1..50b05a8c90dc8f7d2e62a71f95de1534697ec8ab 100644 --- a/src/genai/ideation_agent/utils/nodes.py +++ b/src/genai/ideation_agent/utils/nodes.py @@ -1,78 +1,108 @@ -from .state import State , ValidationFormatter , ImproverResponseFormatter, CriticResponseFormatter -from .tools import retrieve_imdb_ideas_tool , retrieve_influencers_data_tool +from .state import State , ValidationFormatter , CriticResponseFormatter +from .tools import Retrieval from langgraph.prebuilt import create_react_agent -from src.genai.utils.models_loader import ideator_llm, critic_llm , improver_llm , validator_llm -from langchain_core.messages import SystemMessage , HumanMessage -from .prompts import ideator_prompt , critic_prompt , improver_prompt , validator_prompt, idea_refinement_prompt +from src.genai.utils.models_loader import ideator_llm, critic_llm , improver_llm , validator_llm , judge1_llm , judge2_llm +from langchain_core.messages import SystemMessage , HumanMessage, FunctionMessage +from .prompts import ideator_prompt_v3 , critic_prompt_v3 , improver_prompt , validator_prompt, judge_prompt +from .schemas import ideation_json_schema , judge_response_json_schema +class RetrieverNode: + def __init__(self): + pass + + def run(self , state:State): + influencers_data = Retrieval(state.business_details[-1]).influencers_data() + state.influencers_data.append(influencers_data) + return state + class IdeatorNode: def __init__(self): - self.ideator_agent = create_react_agent(model=ideator_llm, tools=[retrieve_imdb_ideas_tool]) + self.llm = ideator_llm def run(self, state:State): - template =ideator_prompt(state) + template = ideator_prompt_v3() messages = [SystemMessage(content=template), HumanMessage(content=f'''The business_details is\n{state.business_details[-1]}\n - The information of the image is:\n{state.image_caption[-1]}''')] - try: - response = self.ideator_agent.invoke({'messages':messages}) - print('Ideator Response:',response) - response = response['messages'][-1].content - state.ideator_response.append(response) - print('Ideator Generated the story') - return state - except: - response = ideator_llm.invoke(messages) - print('Ideator backup Response:',response.content) - state.ideator_response.append(response.content) - return state - + The information of the image is:\n{state.image_caption[-1]}'''), + FunctionMessage(name='inf_data_ideator', content=f'''The data of influencers is:\n {state.influencers_data[-1]}\n''')] + response = self.llm.with_structured_output(ideation_json_schema).invoke(messages) + print('Ideator Response:', response) + state.ideator_response.append(str(response)) + print('Ideator Node executed') + return state + class CriticNode: def __init__(self): - self.critic_agent = create_react_agent(model=critic_llm, tools=[retrieve_imdb_ideas_tool]) + self.llm = critic_llm def run(self,state:State): - template = critic_prompt(state) + template = critic_prompt_v3() messages = [SystemMessage(content=template), - HumanMessage(content=f'''The business_details is\n{state.business_details[-1]}\n - The information of the image is:\n{state.image_caption[-1]}''')] - - try: - response = self.critic_agent.invoke({'messages':messages}) - response = response['messages'][-1].content - print('Critic Response:',response) - state.critic_response.append(response) - print('Critic Evaluated the story') - return state + HumanMessage(content=f'''The ideas generated by ideator are:\n{state.ideator_response[-1]}\n. + The business_details is\n{state.business_details[-1]}\n + The information of the image is:\n{state.image_caption[-1]}'''), + FunctionMessage(name='inf_data_critic', content=f'''The data of influencers is:\n {state.influencers_data[-1]}''')] - except: - response = critic_llm.invoke(messages) - print('Critic backup Response:',response.content) - state.critic_response.append(response.content) - return state + response = self.llm.with_structured_output(ideation_json_schema).invoke(messages) + state.critic_response.append(str(response)) + print('Critic Node executed') + return state + +class Judge: + def __init__(self, llm): + self.llm = llm + + def run (self, state:State): + template = judge_prompt(state) + messages = [SystemMessage(content=template), + HumanMessage(content=f'''The generated 10 ideas are:\n{state.critic_response[-1]}\n. + The business_details is\n{state.business_details[-1]}\n + The information of image is:{state.image_caption[-1]}\n''')] + response = self.llm.with_structured_output(judge_response_json_schema).invoke(messages) + return response -class FormatResponseNode: +class JudgeNode1: def __init__(self): - self.response_list = [] + self.llm = judge1_llm + + def run (self, state:State): + response = Judge(self.llm).run(state) + return {'judge1_response':[response]} + +class JudgeNode2: + def __init__(self): + self.llm = judge2_llm def run(self, state:State): - messages = [SystemMessage(content='''Just extract all the 4 improved ideas and the faults of critic as it is from the critic's response'''), - HumanMessage(content=f'''The critic's response is: \n {state.critic_response[-1]}''')] - response = critic_llm.with_structured_output(CriticResponseFormatter).invoke(messages) - self.response_list.append(response.improved_idea1) - self.response_list.append(response.improved_idea2) - self.response_list.append(response.improved_idea3) - self.response_list.append(response.improved_idea4) - state.formatted_response.append(str(self.response_list)) - state.ideator_fault.append(response.faults) - print('Formatted Response:', self.response_list) - return state + response = Judge(self.llm).run(state) + return {'judge2_response':[response]} +class Aggregrator: + def __init__(self): + self.unique_ideas = {} + + def run(self, state:State): + all_selected_ideas = [ + *state.judge1_response[-1]['selected_ideas'], + *state.judge2_response[-1]['selected_ideas'] + ] -class Validators: + for idea in all_selected_ideas: + title = idea['title'] + if title not in self.unique_ideas or idea['scores']['total_score'] > self.unique_ideas[title]['scores']['total_score']: + self.unique_ideas[title] = idea + + unique_ideas_list = list(self.unique_ideas.values()) + top_4_ideas = sorted(unique_ideas_list, key=lambda x: x['scores']['total_score'], reverse=True)[:4] + print('Type of top 4 ideas:', type(top_4_ideas)) + print('Top 4 ideas are:', top_4_ideas) + state.top_4_ideas.append(top_4_ideas) + return state + +class ValidatorNode: def __init__(self): self.validator_llm1 = validator_llm self.validator_llm2 = improver_llm @@ -86,28 +116,20 @@ class Validators: return response - def run_validator1(self, state:State): + def run(self, state:State): response = self.get_response(state,self.validator_llm1) - state.validator1_response.append(response.result) - if 'not validated' in response.result: state.disagreement_reason.append(response.reason) - return state - - def run_validator2(self, state:State): - response = self.get_response(state,self.validator_llm2) - state.validator2_response.append(response.result) + state.validator_response.append(response.result) if 'not validated' in response.result: state.disagreement_reason.append(response.reason) return state -class RoutingsAfterValidation: +class RoutingAfterValidation: def __init__(self): pass - def route1(self, state:State): - return 'not validated' not in state.validator1_response[-1] + def route(self, state:State): + return 'not validated' not in state.validator_response[-1] - def route2(self, state:State): - return 'not validated' not in state.validator2_response[-1] diff --git a/src/genai/ideation_agent/utils/prompts.py b/src/genai/ideation_agent/utils/prompts.py index 3ffba4bd668016bbd2d465c5051ed612f2792e37..84a77855fe3c113b54229276a101397d9169d47b 100644 --- a/src/genai/ideation_agent/utils/prompts.py +++ b/src/genai/ideation_agent/utils/prompts.py @@ -1,5 +1,109 @@ +def ideator_prompt_v3(): + return f""" +You are Ideasmith Pro, a world-class video ideator trusted by top brands to craft crisp, simple, and high-impact short-form video concepts for TikTok, Instagram, and YouTube Shorts. +Your task is to create exactly 10 unique and highly creative video ideas (conceptual seeds, not full scripts). Each idea should be short, clear, and visually vivid. +I am working on a project where i have to give very unique, creative and feasible video ideas for tiktok and instagram reels to the nepali business houses by understanding their business details. + + +You will be given: +- Business details (from the human message): Focus more strongly on device_used_to_create_videos , challenges_faced and additional informations provided. +- Influencer data (from the function message): You can take it as a reference if it helps you. + +Output Rules: +- Respond in valid JSON format only. +- Return an array of exactly 10 objects. +- Each object must include 5 fields: + 1. title → Short, catchy and unique title: Not more than 3 words. + 2. one_line_description → A simple but very creative and unique one-liner description + 3. hook → The surprising or bold moment that makes the entire idea clicked to the business. + 4. usp → The unique selling proposition tied to the business + +Very Important Creative Guidelines: +- Each idea must be completely different in plot, theme, settings, tone, characters, events etc. +- No repeating characters, locations, or flow patterns. +- Use simple, clear, and engaging language. +- Each idea must end with a strong conclusion that ties back to the business details. +- The idea have to start from an unexpected or abstract scene/event before anchoring back to the business details. + +""" -def ideator_prompt(state): +def critic_prompt_v3(): + return f""" +You are a precision-focused, detail-oriented video ideas critic and ideas refiner. +You’ve been assigned to critique and refine 10 video ideas created by another ideator. +Your job is to identify weaknesses and then improve them making more creative, unique and tied to the business details. +I am working on a project where i have to give very unique, creative and feasible video ideas for tiktok and instagram reels to the nepali business houses by understanding their business details. + +You are be provided with: +- Business details (from the human message): Focus more strongly on device_used_to_create_videos , challenges_faced and additional informations provided. +- Ideas generated by the ideator (from the human message) +- Influencer data (from the function message): You can take it as a reference if it helps you. + +Your Job: +1. Identify collective flaws across the 10 original ideas — e.g., uncreative, lacking uniqueness, repetition, weak hooks, confusing flow, poor tie-in to business, lack of diversity, or unclear USP. +2. Improve or rewrite each idea while keeping the structure intact in JSON format. + - title → Short, catchy title + - one_line_description → A simple but very creative and unique one-liner description + - hook → The surprising or bold moment that makes the entire idea clicked to the business. + - usp → The unique selling proposition tied to the business + +3. You are also allowed to change some entire ideas too if it lacks everything. But if some are already creative, you can refine them. +4. Give the response in simple and understandable vocabularies. + + +Very Important Creative Guidelines: +- Each idea must be completely different in plot, theme, setting, and tone, characters , events etc. +- Avoid repeating characters, hooks, settings, or storylines across the 10 ideas. +- Use simple, clear, and engaging language. +- Each idea must end with a strong, creative tie-in to the business details provided +- The idea have to start from an unexpected or abstract scene, character, or event, then subtly anchor it to the business in the conclusion. + +""" + +def judge_prompt(state): + return f""" +You are a **precision-focused creative judge** tasked with selecting the **4 best video ideas** from a set of 10. +Each idea must be analyzed and scored on specific creativity and business alignment metrics. + +--- + +### Scoring Rules: +- You must evaluate all 10 ideas against the metrics below. +- Use a 1–5 score scale for each metric: +- Metrics to score for every idea: +1. Originality: 1=Very common, 2=Slight twist, 3=Moderately unique, 4=Rare/innovative, 5=Completely new +2. Fluency: 1=One-off, 2=Few vars, 3=Some, 4=Many contexts, 5=Endless remix +3. Flexibility: 1=One niche, 2=Few creators, 3=Several niches, 4=Many categories, 5=Universal +4. Feasibility (phone-only): 1=Impossible, 2=Very hard, 3=Possible w/ effort, 4=Easy on phone, 5=Effortless +5. Practical Value: 1=None, 2=Low, 3=Some, 4=High, 5=Very high/viral +6. Surprise_factor: 1=Predictable, 2=Mild, 3=Moderate, 4=Strong, 5=Shocking +7. Combinatorial Novelty: 1=Copy, 2=Slight remix, 3=Familiar combo, 4=Creative blend, 5=Radical fusion +8. Simplicity: 1=Very complex, 2=Complicated, 3=Some effort, 4=Simple, 5=Instantly clear +9. Scalability: 1=One-time, 2=Few times, 3=Limited repeats, 4=Recurring, 5=Endless series +10. Cultural Freshness: 1=Outdated, 2=Stale, 3=Current common, 4=Fresh twist, 5=Trendsetting +11. Alignment_with_business_details +12. Total_score + +--- + +### Selection Rules: +1. After scoring all 10, calculate a **total score** for each idea. +2. Select the **top 4 ideas with the highest total scores**. Remember, don't change the idea. Just give the selected ideas as it is. You are not allowed to change the idea even a little bit. Just score them +3. If two ideas tie, prefer the one with better **Alignment with business details**. +4. Only return the **final 4 selected ideas** with their **scores included**. + +--- +The 10 ideas will be provided to you through HumanMessage. + +### Business Details: +{state.business_details[-1]} + + +--- +Give the output in json format. +""" + +def ideator_prompt_v2(state): return f""" You are a **precision-focused, culturally-attuned video ideator** trusted by top global brands to craft **crisp, reference-grounded, and high-impact video concepts** for social platforms like TikTok, Instagram, and YouTube. @@ -40,7 +144,7 @@ Do not draw from anything outside the provided IMDb ideas. Every idea must end in a subtle, creative tie-in to the business details. """ -def critic_prompt(state): +def critic_prompt_v2(state): return f""" You are a **precision-focused, detail-oriented video idea critic**. You’ve been assigned to **critique 4 video ideas** created by another ideator and then **refine or improve them strictly based on reference material provided**. @@ -99,7 +203,7 @@ Improved Ideas from Critic: """ -def ideator_prompt_old(state): +def ideator_prompt_v1(state): return f""" You are a **bold, imaginative, and culturally-attuned video ideator** trusted by top global brands to craft **crisp, original, and high-impact video concepts** for social platforms like TikTok, Instagram, and YouTube. @@ -160,11 +264,7 @@ This differs from a full story in that: Now, based on the business details above **and inspired by the 10 IMDb reference ideas**, generate **four original, 40-word promotional video ideas** that can inspire unforgettable video content. """ - - - - -def critic_prompt_old(state): +def critic_prompt_v1(state): return f""" You are a sharp, imaginative, and detail-oriented **video idea critic**. You’ve been assigned to **critique 4 video ideas** created by another ideator and then **refine or improve** them. If some **valid and usuable information** of image is provided to you by the user, you have to **strongly focus** in that information of image because that is the reference of what type of idea does the user wants. @@ -225,9 +325,6 @@ Improved Ideas from Critic: --- """ - - - def improver_prompt(state): disagreement_note = ( f"**Note:** The previous version was not validated because:\n{state.disagreement_reason[-1]}\n" @@ -307,9 +404,6 @@ Now begin your final analysis and output your improved 4 ideas in the required f """ - - - def validator_prompt(state): return f''' You are reviewing 4 short video ideas meant for a social media promotional campaign. @@ -323,7 +417,7 @@ Each idea creatively tells a short story that connects indirectly to the busines {state.business_details[-1]} **Final 4 Ideas from Improver**: -{state.formatted_response[-1]} +{state.top_4_ideas[-1]} --- diff --git a/src/genai/ideation_agent/utils/schemas.py b/src/genai/ideation_agent/utils/schemas.py new file mode 100644 index 0000000000000000000000000000000000000000..a3b5f4b6fc92a522c25efbc4aabf7684cb7531b5 --- /dev/null +++ b/src/genai/ideation_agent/utils/schemas.py @@ -0,0 +1,116 @@ +ideation_json_schema={ + "title": "IdeasSchema", + "type": "object", + "properties": { + "ideas": { + "type": "array", + "items": { + "title": "IdeaFormatter", + "type": "object", + "properties": { + "title": { + "type": "string", + "description": "The main title of the idea" + }, + "one- line_description": { + "type": "string", + "description": "A one liner description of the idea" + }, + "hook": { + "type": "string", + "description": "The attention-grabbing hook for the idea" + }, + "unique_selling_proposition": { + "type": "string", + "description": "The unique selling point that makes this idea stand out" + } + }, + "required": [ + "title", + "one_line_description", + "hook", + "unique_selling_proposition" + ] + } + } + }, + "required": ["ideas"] +} + + +judge_response_json_schema = { + "title": "JudgeResponseFormatter", + "type": "object", + "properties": { + "selected_ideas": { + "type": "array", + "items": { + "title": "SelectedIdea", + "type": "object", + "properties": { + "title": { + "type": "string", + "description": "The main title of the selected idea" + }, + "one_line_description": { + "type": "string", + "description": "A short one-line description of the idea" + }, + "hook": { + "type": "string", + "description": "The attention-grabbing hook" + }, + "storyline_flow": { + "type": "string", + "description": "The structured flow of the storyline" + }, + "unique_selling_proposition": { + "type": "string", + "description": "The unique selling proposition (USP) of the idea" + }, + "scores": { + "title": "Scores", + "type": "object", + "properties": { + "originality": { "type": "integer" }, + "feasibility": { "type": "integer" }, + "practical_value": { "type": "integer" }, + "flexibility": { "type": "integer" }, + "fluency": { "type": "integer" }, + "simplicity": { "type": "integer" }, + "combinatorial_novelty": { "type": "integer" }, + "culture_freshness": { "type": "integer" }, + "surprise_factor": { "type": "integer" }, + "scalability": { "type": "integer" }, + "alignment_with_business_details": { "type": "integer" }, + "total_score": { "type": "integer" } + }, + "required": [ + "originality", + "feasibility", + "practical_value", + "flexibility", + "fluency", + "simplicity", + "combinatorial_novelty", + "culture_freshness", + "surprise_factor", + "scalability", + "alignment_with_business_details", + "total_score" + ] + } + }, + "required": [ + "title", + "one_line_description", + "hook", + "storyline_flow", + "unique_selling_proposition", + "scores" + ] + } + } + }, + "required": ["selected_ideas"] +} diff --git a/src/genai/ideation_agent/utils/state.py b/src/genai/ideation_agent/utils/state.py index b9898e670d22a80d300424ea7c15278d8275d593..f61de439c5b7d7fbc4b1a4f629e9c75fe1556114 100644 --- a/src/genai/ideation_agent/utils/state.py +++ b/src/genai/ideation_agent/utils/state.py @@ -1,20 +1,19 @@ from pydantic import BaseModel, Field -from typing import Optional +from typing import Optional , List import operator from typing import Annotated class State(BaseModel): - business_details : Annotated[list[dict],operator.add] = [] - ideator_response: Annotated[list[str],operator.add] = [] - critic_response: Annotated[list[str],operator.add] = [] - ideator_fault: Annotated[list[str],operator.add] = [] - formatted_response: Annotated[list[str],operator.add] = [] - validator1_response: Annotated[list[str],operator.add] = [] - validator2_response: Annotated[list[str],operator.add] = [] - validator3_response: Annotated[list[str],operator.add] = [] - validator4_response: Annotated[list[str],operator.add] = [] - disagreement_reason: Annotated[list[str],operator.add] = [] - image_caption: Annotated[list[str],operator.add] = [] + business_details : Annotated[list[dict],operator.add] = [] + influencers_data : Annotated[list[str],operator.add] = [] + ideator_response: Annotated[list[str],operator.add] = [] + critic_response: Annotated[list[str],operator.add] = [] + judge1_response : Annotated[list[dict],operator.add] = [] + judge2_response : Annotated[list[dict],operator.add] = [] + top_4_ideas : Annotated[list[list],operator.add] = [] + validator_response: Annotated[list[str],operator.add] = [] + disagreement_reason: Annotated[list[str],operator.add] = [] + image_caption: Annotated[list[str],operator.add] = [] class QueryFormatter(BaseModel): business_details: str = Field(description="The details of the business that user passes to the agent") @@ -38,3 +37,5 @@ class CriticResponseFormatter(BaseModel): improved_idea4: str = Field(description="Returns the fourth improved idea of exactly 40 words. ") + + diff --git a/src/genai/ideation_agent/utils/tools.py b/src/genai/ideation_agent/utils/tools.py index 2ee18d7d12c2f22fe2b35c6c4e974282a8ace864..2d92f3546dcaa57ec7e791e8d764fd6c8c33683b 100644 --- a/src/genai/ideation_agent/utils/tools.py +++ b/src/genai/ideation_agent/utils/tools.py @@ -76,21 +76,5 @@ class Retrieval: -@tool("influencers_data_retrieval_tool", args_schema=QueryFormatter, return_direct=False,description="Retrieve influencer-related data for a given query.") -def retrieve_influencers_data_tool(business_details): - ''' - Always invoke this tool. - Retrieve influencer's data by semantic search of **business details**. - ''' - return Retrieval(business_details).influencers_data() - - -@tool("imdb_movies_ideas_retrieval_tool", args_schema=QueryFormatter, return_direct=False,description="Retrieve imdb movies-related idea for a given query.") -def retrieve_imdb_ideas_tool(business_details): - ''' - Always invoke this tool. - Retrieve the ideas of imdb_movies by semantic search of **business details**. - ''' - return Retrieval(business_details).imdb_ideas() diff --git a/src/genai/orchestration_agent/__pycache__/__init__.cpython-311.pyc b/src/genai/orchestration_agent/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9adb805f468b7b83994bbae5761edeb0bd4e2bd7 Binary files /dev/null and b/src/genai/orchestration_agent/__pycache__/__init__.cpython-311.pyc differ diff --git a/src/genai/orchestration_agent/__pycache__/agent.cpython-311.pyc b/src/genai/orchestration_agent/__pycache__/agent.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b68166b79c18bba9a59006bc88cc1c3b9af387d Binary files /dev/null and b/src/genai/orchestration_agent/__pycache__/agent.cpython-311.pyc differ diff --git a/src/genai/orchestration_agent/utils/__pycache__/__init__.cpython-311.pyc b/src/genai/orchestration_agent/utils/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56163e45aa8fde70e26c8a517b2df048f15d10e7 Binary files /dev/null and b/src/genai/orchestration_agent/utils/__pycache__/__init__.cpython-311.pyc differ diff --git a/src/genai/orchestration_agent/utils/__pycache__/nodes.cpython-311.pyc b/src/genai/orchestration_agent/utils/__pycache__/nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ea25843afc0a1979a2be65d746ea1e14acaa708 Binary files /dev/null and b/src/genai/orchestration_agent/utils/__pycache__/nodes.cpython-311.pyc differ diff --git a/src/genai/orchestration_agent/utils/__pycache__/prompts.cpython-311.pyc b/src/genai/orchestration_agent/utils/__pycache__/prompts.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..532396b57708e1453be70d70e4cdfbbfc4b624cd Binary files /dev/null and b/src/genai/orchestration_agent/utils/__pycache__/prompts.cpython-311.pyc differ diff --git a/src/genai/orchestration_agent/utils/__pycache__/state.cpython-311.pyc b/src/genai/orchestration_agent/utils/__pycache__/state.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..101f84e62490492a9c16f96598b50d94ea82ee63 Binary files /dev/null and b/src/genai/orchestration_agent/utils/__pycache__/state.cpython-311.pyc differ diff --git a/src/genai/orchestration_agent/utils/__pycache__/tools.cpython-311.pyc b/src/genai/orchestration_agent/utils/__pycache__/tools.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbb87005430af775d02a3da39ae9d47357755a43 Binary files /dev/null and b/src/genai/orchestration_agent/utils/__pycache__/tools.cpython-311.pyc differ diff --git a/src/genai/orchestration_agent/utils/__pycache__/tools.cpython-312.pyc b/src/genai/orchestration_agent/utils/__pycache__/tools.cpython-312.pyc index 6f93c9849c90afe4426c808e3ad4e9e540991119..4f31da68281597930f11fd46ea6515fe886dc21c 100644 Binary files a/src/genai/orchestration_agent/utils/__pycache__/tools.cpython-312.pyc and b/src/genai/orchestration_agent/utils/__pycache__/tools.cpython-312.pyc differ diff --git a/src/genai/orchestration_agent/utils/__pycache__/utils.cpython-311.pyc b/src/genai/orchestration_agent/utils/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe3e5996643c8dbc317f818bfe5f0944731d2f0b Binary files /dev/null and b/src/genai/orchestration_agent/utils/__pycache__/utils.cpython-311.pyc differ diff --git a/src/genai/orchestration_agent/utils/__pycache__/utils.cpython-312.pyc b/src/genai/orchestration_agent/utils/__pycache__/utils.cpython-312.pyc index 549032bd090dc51de4cb31064070a73bb8004945..e58671a83f522132768935a7511addcd21ae1673 100644 Binary files a/src/genai/orchestration_agent/utils/__pycache__/utils.cpython-312.pyc and b/src/genai/orchestration_agent/utils/__pycache__/utils.cpython-312.pyc differ diff --git a/src/genai/utils/__pycache__/__init__.cpython-311.pyc b/src/genai/utils/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e17fe324d0068d19d49149912066a1c8d8e85dfa Binary files /dev/null and b/src/genai/utils/__pycache__/__init__.cpython-311.pyc differ diff --git a/src/genai/utils/__pycache__/data_loader.cpython-311.pyc b/src/genai/utils/__pycache__/data_loader.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4071031268e555513aa27d44aacbb69f588d79d Binary files /dev/null and b/src/genai/utils/__pycache__/data_loader.cpython-311.pyc differ diff --git a/src/genai/utils/__pycache__/models_loader.cpython-311.pyc b/src/genai/utils/__pycache__/models_loader.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b89f23a4fde08d2a4e7e42f3deed1605fed1e343 Binary files /dev/null and b/src/genai/utils/__pycache__/models_loader.cpython-311.pyc differ diff --git a/src/genai/utils/__pycache__/models_loader.cpython-312.pyc b/src/genai/utils/__pycache__/models_loader.cpython-312.pyc index 500366551849358acce8caced8e72b96e24bfaed..30abead2c8b62b9332425773cbcce16243ef1657 100644 Binary files a/src/genai/utils/__pycache__/models_loader.cpython-312.pyc and b/src/genai/utils/__pycache__/models_loader.cpython-312.pyc differ diff --git a/src/genai/utils/__pycache__/utils.cpython-311.pyc b/src/genai/utils/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8891b0ccc5beeb18c4fbfaba6de76339e76b103a Binary files /dev/null and b/src/genai/utils/__pycache__/utils.cpython-311.pyc differ diff --git a/src/genai/utils/models_loader.py b/src/genai/utils/models_loader.py index b4a2ace8f4650e874e44e86965c91390d292687c..6ae435f16ca9bdc55acaad1966c2ad5139f3a3c6 100644 --- a/src/genai/utils/models_loader.py +++ b/src/genai/utils/models_loader.py @@ -22,10 +22,12 @@ llm_gpt = ChatOpenAI(model="gpt-4o-mini",temperature=0.3) captioning_model = "meta-llama/llama-4-scout-17b-16e-instruct" image_generation_model = "black-forest-labs/FLUX.1-schnell" -improver_llm = llm_anthropic -ideator_llm = llm_anthropic -critic_llm = llm_anthropic -validator_llm = llm_anthropic +improver_llm = llm_gpt +ideator_llm = llm_gpt +critic_llm = llm_gpt +validator_llm = llm_gpt +judge1_llm = llm_gpt +judge2_llm = llm_gpt diff --git a/src/genai/utils/utils.py b/src/genai/utils/utils.py index 79634341ec8f6f17dab089f681c8a03372030dae..f2ffb1fd2e6942769a2716b1d4ffc0057463b4bd 100644 --- a/src/genai/utils/utils.py +++ b/src/genai/utils/utils.py @@ -7,28 +7,14 @@ def clean_text(text: str) -> str: if not isinstance(text, str): return "" - - # Normalize encoded newlines and tabs text = text.replace("\\n", "\n").replace("\\t", " ") - - # Remove stray backslashes (\\), unless part of newline text = re.sub(r"\\(?!n)", '', text) - - # Remove brackets often used for metadata or markup text = re.sub(r'[\[\]{}<>]', '', text) - - # Remove quotes text = re.sub(r"[\"']", '', text) - - # Remove special characters except basic punctuation (.,!?) text = re.sub(r"[^a-zA-Z0-9.,!? \n]", '', text) - - # Remove repeated punctuation like "!!!" or "???" text = re.sub(r'([!?.,]){2,}', r'\1', text) - - # Normalize multiple spaces and newlines text = re.sub(r'[ \t]+', ' ', text) - text = re.sub(r'\n{3,}', '\n\n', text) # Collapse more than 2 newlines to just 2 - text = re.sub(r' *\n *', '\n', text) # Clean spaces around newlines + text = re.sub(r'\n{3,}', '\n\n', text) + text = re.sub(r' *\n *', '\n', text) return text.strip() \ No newline at end of file