subashpoudel commited on
Commit
946d35b
·
1 Parent(s): a9f99c3

Completed response rendering between endpoints

Browse files
.gitignore CHANGED
@@ -1,4 +1,5 @@
1
  myenv
2
  .env
3
  static
4
- templates
 
 
1
  myenv
2
  .env
3
  static
4
+ templates
5
+ main_demo.py
__pycache__/main.cpython-312.pyc CHANGED
Binary files a/__pycache__/main.cpython-312.pyc and b/__pycache__/main.cpython-312.pyc differ
 
__pycache__/main_demo.cpython-312.pyc ADDED
Binary file (1.29 kB). View file
 
main.py CHANGED
@@ -5,89 +5,103 @@ from my_agent.agent import build_graph
5
  import pandas as pd
6
  from typing import Optional , List
7
  from my_agent.utils.initial_interaction import BusinessInteractionChatbot
8
- import base64
9
- from PIL import Image
10
- from io import BytesIO
11
  import json
 
 
 
 
12
 
13
  app = FastAPI()
14
  interaction_chatbot = BusinessInteractionChatbot()
15
  graph = build_graph()
16
 
17
 
18
-
19
-
20
 
21
  class UserMessage(BaseModel):
22
  message: str
23
- details_for_brainstrom = {}
24
  @app.post("/business-interaction")
25
  def business_chat(msg: UserMessage):
26
- global details_for_brainstrom
27
  response = interaction_chatbot.chat(msg.message)
28
  if interaction_chatbot.is_complete(response):
29
  details = interaction_chatbot.extract_details()
30
- details_for_brainstrom = details
31
  return {"response": response, "business_details": details, "complete": True}
32
  return {"response": response, "complete": False}
33
 
34
 
35
 
36
- # class RequestInput(BaseModel):
37
- # query: list
38
- # preferred_topics: Optional[list] = []
39
- # images: Optional[list[str]] = [] # base64-encoded image strings
40
-
41
- # @app.post("/brainstrom")
42
- # def run_graph(input_data: RequestInput):
43
- # image_objects = []
44
- # for img_b64 in input_data.images:
45
- # image_objects.append(process_image(img_b64)) # decode and load images
46
-
47
- # result = graph.invoke({
48
- # 'topic': input_data.query,
49
- # 'images': image_objects,
50
- # 'business_details': details_for_brainstrom
51
- # })
52
-
53
- # return {
54
- # 'final_story': result['final_story'],
55
- # 'business_details': result['business_details'],
56
- # }
57
 
58
 
59
 
60
-
61
- # Convert uploaded image to base64 string
62
- def encode_image_to_base64(uploaded_file: UploadFile) -> str:
63
- return base64.b64encode(uploaded_file.file.read()).decode("utf-8")
64
-
65
- # Convert base64 string to PIL image (optional for LangGraph processing)
66
- def process_image(base64_str: str) -> Image.Image:
67
- image_data = base64.b64decode(base64_str)
68
- return Image.open(BytesIO(image_data))
69
-
70
  @app.post("/brainstrom")
71
- async def run_graph(
72
  query: List[str], # sent as JSON body
73
  preferred_topics: Optional[list] = [],
74
  images: Optional[List[UploadFile]] = [], # ✅ Optional UploadFile list
75
- thread_id: Optional[str] = "default-session"
76
  ):
77
  # Convert uploaded images to base64
78
  image_base64_list = [encode_image_to_base64(img) for img in images]
79
 
80
- # Convert base64 to image objects (if LangGraph expects PIL.Image)
81
- image_objects = [process_image(img_b64) for img_b64 in image_base64_list]
82
-
83
  # Invoke LangGraph
84
  result = graph.invoke({
85
  'topic': query,
86
  'images': image_base64_list,
87
- 'latest_preferred_topics':preferred_topics
 
88
  },
89
  config={"configurable": {"thread_id": thread_id}})
 
 
90
 
91
  return {
92
  'response': result,
93
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  import pandas as pd
6
  from typing import Optional , List
7
  from my_agent.utils.initial_interaction import BusinessInteractionChatbot
8
+ from my_agent.utils.utils import encode_image_to_base64 , generate_final_story
9
+
 
10
  import json
11
+ from my_agent.utils.nodes import generate_final_story
12
+ from collections import defaultdict
13
+
14
+ # Store brainstorming results per thread_id
15
 
16
  app = FastAPI()
17
  interaction_chatbot = BusinessInteractionChatbot()
18
  graph = build_graph()
19
 
20
 
21
+ stored_data={}
 
22
 
23
  class UserMessage(BaseModel):
24
  message: str
 
25
  @app.post("/business-interaction")
26
  def business_chat(msg: UserMessage):
 
27
  response = interaction_chatbot.chat(msg.message)
28
  if interaction_chatbot.is_complete(response):
29
  details = interaction_chatbot.extract_details()
30
+ stored_data['business_details'] = details
31
  return {"response": response, "business_details": details, "complete": True}
32
  return {"response": response, "complete": False}
33
 
34
 
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
 
38
 
 
 
 
 
 
 
 
 
 
 
39
  @app.post("/brainstrom")
40
+ def run_graph(
41
  query: List[str], # sent as JSON body
42
  preferred_topics: Optional[list] = [],
43
  images: Optional[List[UploadFile]] = [], # ✅ Optional UploadFile list
44
+ thread_id: Optional[str] = "default-session",
45
  ):
46
  # Convert uploaded images to base64
47
  image_base64_list = [encode_image_to_base64(img) for img in images]
48
 
 
 
 
49
  # Invoke LangGraph
50
  result = graph.invoke({
51
  'topic': query,
52
  'images': image_base64_list,
53
+ 'latest_preferred_topics':preferred_topics,
54
+ 'business_details': stored_data['business_details']
55
  },
56
  config={"configurable": {"thread_id": thread_id}})
57
+ stored_data['brainstroming_response']=result
58
+ # brainstorm_store[thread_id] = result
59
 
60
  return {
61
  'response': result,
62
  }
63
+
64
+
65
+
66
+
67
+ @app.post("/generate-final-story")
68
+ def generate_final_story_endpoint(
69
+ ):
70
+ final_story = generate_final_story(stored_data["brainstroming_response"])
71
+ return {
72
+ 'response': final_story
73
+ }
74
+
75
+
76
+
77
+
78
+
79
+
80
+
81
+
82
+
83
+
84
+
85
+
86
+
87
+
88
+
89
+
90
+
91
+
92
+
93
+
94
+
95
+
96
+
97
+
98
+
99
+
100
+
101
+
102
+
103
+
104
+
105
+
106
+
107
+
my_agent/utils/__pycache__/nodes.cpython-312.pyc CHANGED
Binary files a/my_agent/utils/__pycache__/nodes.cpython-312.pyc and b/my_agent/utils/__pycache__/nodes.cpython-312.pyc differ
 
my_agent/utils/__pycache__/utils.cpython-312.pyc ADDED
Binary file (2.92 kB). View file
 
my_agent/utils/nodes.py CHANGED
@@ -193,14 +193,14 @@ def route_after_selection(state:State):
193
  elif len(state.latest_preferred_topics)>0:
194
  return True
195
 
196
- def generate_final_story(state:State)-> State:
197
- if len(state.preferred_topics)>0:
198
  template = f'''I want to create a detailed storyline for a video in the given topic. You have to provide me that storyline what to include in the video.
199
  Now, i am giving you the topic of the video. But the need is to generate the story focusing on the format that i'll provide to you.
200
- You can use this format for the reference purpose, not for the exact similar generation. The format is:\n{state.retrievals[-1]}.
201
- \n\n Now let's start creating the storyline for my topic. The topic of the video is: \n\n{state.topic}\n\n
202
 
203
- **Final Reminder** You have to strongly focus on these topics while creating the storyline: {[item for sublist in state.preferred_topics for item in sublist]}'''
204
  messages = [SystemMessage(content=template)]
205
  response = llm.bind_tools([StoryFormatter]).invoke(messages)
206
  print('The final response is:',response)
@@ -210,13 +210,12 @@ def generate_final_story(state:State)-> State:
210
  response = response.content
211
  else:
212
  response = "No response"
213
- state.final_story.append(response)
214
- state.stories.append(response)
215
- return state
216
 
217
- state.final_story.append(state.stories[-1])
218
- state.latest_preferred_topics=[]
219
- return state
220
 
221
 
222
 
 
193
  elif len(state.latest_preferred_topics)>0:
194
  return True
195
 
196
+ def generate_final_story(query):
197
+ if len(query['preferred_topics'])>0:
198
  template = f'''I want to create a detailed storyline for a video in the given topic. You have to provide me that storyline what to include in the video.
199
  Now, i am giving you the topic of the video. But the need is to generate the story focusing on the format that i'll provide to you.
200
+ You can use this format for the reference purpose, not for the exact similar generation. The format is:\n{query['retrievals'][-1]}.
201
+ \n\n Now let's start creating the storyline for my topic. The topic of the video is: \n\n{query['topic']}\n\n
202
 
203
+ **Final Reminder** You have to strongly focus on these topics while creating the storyline: {[item for sublist in query['preferred_topics'] for item in sublist]}'''
204
  messages = [SystemMessage(content=template)]
205
  response = llm.bind_tools([StoryFormatter]).invoke(messages)
206
  print('The final response is:',response)
 
210
  response = response.content
211
  else:
212
  response = "No response"
213
+ # state.final_story.append(response)
214
+ # state.stories.append(response)
215
+ return response
216
 
217
+ else:
218
+ return query['stories'][-1]
 
219
 
220
 
221
 
my_agent/utils/utils.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from langchain_core.messages import SystemMessage
3
+ from .tools import StoryFormatter
4
+ from .models_loader import llm
5
+ import base64
6
+ from PIL import Image
7
+ from io import BytesIO
8
+ from fastapi import UploadFile
9
+ def generate_final_story(query):
10
+ if len(query['preferred_topics'])>0:
11
+ template = f'''I want to create a detailed storyline for a video in the given topic. You have to provide me that storyline what to include in the video.
12
+ Now, i am giving you the topic of the video. But the need is to generate the story focusing on the format that i'll provide to you.
13
+ You can use this format for the reference purpose, not for the exact similar generation. The format is:\n{query['retrievals'][-1]}.
14
+ \n\n Now let's start creating the storyline for my topic. The topic of the video is: \n\n{query['topic']}\n\n
15
+
16
+ **Final Reminder** You have to strongly focus on these topics while creating the storyline: {[item for sublist in query['preferred_topics'] for item in sublist]}'''
17
+ messages = [SystemMessage(content=template)]
18
+ response = llm.bind_tools([StoryFormatter]).invoke(messages)
19
+ print('The final response is:',response)
20
+ if hasattr(response, 'tool_calls') and response.tool_calls:
21
+ response = response.tool_calls[0]['args']
22
+ elif hasattr(response, 'content'):
23
+ response = response.content
24
+ else:
25
+ response = "No response"
26
+ # state.final_story.append(response)
27
+ # state.stories.append(response)
28
+ return response
29
+
30
+ else:
31
+ return query['stories'][-1]
32
+
33
+
34
+
35
+ def encode_image_to_base64(uploaded_file: UploadFile) -> str:
36
+ return base64.b64encode(uploaded_file.file.read()).decode("utf-8")
37
+
38
+
39
+ # Convert base64 string to PIL image (optional for LangGraph processing)
40
+ def process_image(base64_str: str) -> Image.Image:
41
+ image_data = base64.b64decode(base64_str)
42
+ return Image.open(BytesIO(image_data))