Spaces:
Sleeping
Sleeping
File size: 8,085 Bytes
be3a5c4 85a68fb be3a5c4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 | import pandas as pd
import ast
from .state import State
from .tools import StoryFormatter, BrainstromTopicFormatter
from langchain_core.messages import SystemMessage
from .models_loader import llm , ST
from .data_loader import load_influencer_data
def retrieve(state: State) -> State:
print('Moving to retrieval process')
retrievals=[]
for topic in state.topic: # Loop through each topic
embedded_query = ST.encode(topic) # Embed each topic
data = load_influencer_data()
scores, retrieved_examples = data.get_nearest_examples("embeddings", embedded_query, k=1)
# Construct a list of dictionaries for this topic
result = [{user: story} for user, story in zip(retrieved_examples['username'], retrieved_examples['agentic_story'])]
retrievals.append(result)
print('Retrieval process completed......')
state.retrievals.append(retrievals)
print('The retrieval is:\n',state.retrievals )
# return State(messages="Retrieved",topic=state.topic,retrievals=state.retrievals)
return state
def generate_story(state:State)-> State:
topic=state.topic
print('The state retrieval is:',state.retrievals)
retrieval_list= state.retrievals[-1]
agentic_stories = []
for item in retrieval_list:
print('item:', item[-1].values())
agentic_stories.extend(item[-1].values()) # Add all stories to the list
retrieval = " ".join(agentic_stories)
if len(state.preferred_topics)==0:
template = f'''I want to create a detailed storyline for a video in any domain. You have to provide me that storyline what to include in the video.
Now, i am giving you the topic of the video. But the need is to generate the story focusing on the format that i'll provide to you.
You can use this format for the reference purpose, not for the exact similar generation. Th format is:\n{retrieval}.
\n\n Now let's start creating the storyline for my topic. The topic of the video is: \n\n{state.topic}'''
else:
template = f'''I want to create a detailed storyline for a video in the given topic. You have to provide me that storyline what to include in the video.
Now, i am giving you the topic of the video. But the need is to generate the story focusing on the format that i'll provide to you.
You can use this format for the reference purpose, not for the exact similar generation. The format is:\n{retrieval}.
\n\n Now let's start creating the storyline for my topic. The topic of the video is: \n\n{state.topic}\n\n
**Final Reminder** You have to strongly focus on these topics while creating the storyline: {state.preferred_topics[-1]}'''
messages = [SystemMessage(content=template)]
response = llm.bind_tools([StoryFormatter]).invoke(messages)
print('The response is:',response)
if hasattr(response, 'tool_calls') and response.tool_calls:
response = response.tool_calls[0]['args']
elif hasattr(response, 'content'):
response = response.content
else:
response = "No response"
state.stories.append(response)
# return State(messages="Story generated", topic=state.topic,stories=state.stories)
return state
def generate_brainstroming(state:State)-> State:
story=state.stories[-1]
template= f'''I want to brainstorm ways to diversify or improve a storyline in exactly 4 sentences.
The goal is to generate creative and actionable ideas that are not on the storyline on how the storyline can be expanded or modified for better engagement.
For example: If the storyline is about creating a promotional video for a restaurant, the new suggestions might include:
- I want to showcase the chef preparing a signature dish.
- I want to add a sequence of customers sharing their experiences at the restaurant.
- I want to highlight the farm-to-table sourcing of ingredients with a short segment showing local farms.
- I want to include a time-lapse of the restaurant transforming from day to night, capturing its unique ambiance.
- I want to feature a quick interview with the owner sharing the story behind the restaurant.
Now, I will provide you with the storyline. The storyline is:\n{story}'''
messages = [SystemMessage(content=template)]
response = llm.bind_tools([BrainstromTopicFormatter]).invoke(messages)
print('The response is:',response)
if hasattr(response, 'tool_calls') and response.tool_calls:
response = response.tool_calls[0]['args']
elif hasattr(response, 'content'):
response = response.content
else:
response = "No response"
state.brainstroming_topics.append(response)
print('The brainstroming topics are:',state.brainstroming_topics)
# return State(messages="Story generated",topic=state.topic,brainstroming_topics=state.brainstroming_topics)
return state
def select_preferred_topics(state: State)-> State:
print("---human_feedback---")
topic_values = list(state.brainstroming_topics[-1].values())
print("Available topics:")
for idx, topic in enumerate(topic_values, 1):
print(f"{idx}. {topic}")
raw_input_str = input("Enter the numbers of your preferred topics (comma-separated), or press Enter to skip: ").strip()
if not raw_input_str:
state.carry_on=False
print("No topics selected. Ending process.")
return state
try:
preferred_indices = [int(i.strip()) for i in raw_input_str.split(",")]
preferred_topics = [topic_values[i - 1] for i in preferred_indices if 0 < i <= len(topic_values)]
state.preferred_topics.append(preferred_topics)
except Exception:
state.carry_on=False
print("Invalid input. Please try again.")
return state
if not preferred_topics:
state.carry_on=False
print("No valid topics selected. Ending process.")
return state
print("You selected:")
print(preferred_topics)
state.carry_on=True
return state
# def select_preferred_topics(state: State) -> State:
# print("---API_feedback_mode---")
# if not state.brainstroming_topics:
# print("No brainstormed topics found.")
# state.carry_on = False
# return state
# # Get the latest set of brainstormed topics
# topic_values = list(state.brainstroming_topics[-1].values())
# print(f"Available topics: {topic_values}")
# # Ensure preferred_topics is well-formed
# if state.preferred_topics and isinstance(state.preferred_topics[-1], list):
# latest_selection = state.preferred_topics[-1]
# if latest_selection:
# print("User selected topics:")
# print(latest_selection)
# state.carry_on = True
# return state
# print("No preferred topics selected via API. Ending feedback loop.")
# state.carry_on = False
# return state
def generate_final_story(state:State)-> State:
template = f'''I want to create a detailed storyline for a video in the given topic. You have to provide me that storyline what to include in the video.
Now, i am giving you the topic of the video. But the need is to generate the story focusing on the format that i'll provide to you.
You can use this format for the reference purpose, not for the exact similar generation. The format is:\n{state.retrievals[-1]}.
\n\n Now let's start creating the storyline for my topic. The topic of the video is: \n\n{state.topic}\n\n
**Final Reminder** You have to strongly focus on these topics while creating the storyline: {[item for sublist in state.preferred_topics for item in sublist]}'''
messages = [SystemMessage(content=template)]
response = llm.bind_tools([StoryFormatter]).invoke(messages)
print('The final response is:',response)
if hasattr(response, 'tool_calls') and response.tool_calls:
response = response.tool_calls[0]['args']
elif hasattr(response, 'content'):
response = response.content
else:
response = "No response"
state.final_story=response
state.stories.append(response)
return state
def route_after_selection(state:State):
print('The output is:',state.carry_on)
return state.carry_on
|