Spaces:
Sleeping
Sleeping
| import pandas as pd | |
| import ast | |
| from .state import State | |
| from .tools import StoryFormatter, BrainstromTopicFormatter | |
| from langchain_core.messages import SystemMessage | |
| from .models_loader import llm , ST | |
| from .data_loader import load_influencer_data | |
| def retrieve(state: State) -> State: | |
| print('Moving to retrieval process') | |
| retrievals=[] | |
| for topic in state.topic: # Loop through each topic | |
| embedded_query = ST.encode(topic) # Embed each topic | |
| data = load_influencer_data() | |
| scores, retrieved_examples = data.get_nearest_examples("embeddings", embedded_query, k=1) | |
| # Construct a list of dictionaries for this topic | |
| result = [{user: story} for user, story in zip(retrieved_examples['username'], retrieved_examples['agentic_story'])] | |
| retrievals.append(result) | |
| print('Retrieval process completed......') | |
| state.retrievals.append(retrievals) | |
| print('The retrieval is:\n',state.retrievals ) | |
| # return State(messages="Retrieved",topic=state.topic,retrievals=state.retrievals) | |
| return state | |
| def generate_story(state:State)-> State: | |
| topic=state.topic | |
| print('The state retrieval is:',state.retrievals) | |
| retrieval_list= state.retrievals[-1] | |
| agentic_stories = [] | |
| for item in retrieval_list: | |
| print('item:', item[-1].values()) | |
| agentic_stories.extend(item[-1].values()) # Add all stories to the list | |
| retrieval = " ".join(agentic_stories) | |
| if len(state.preferred_topics)==0: | |
| template = f'''I want to create a detailed storyline for a video in any domain. You have to provide me that storyline what to include in the video. | |
| Now, i am giving you the topic of the video. But the need is to generate the story focusing on the format that i'll provide to you. | |
| You can use this format for the reference purpose, not for the exact similar generation. Th format is:\n{retrieval}. | |
| \n\n Now let's start creating the storyline for my topic. The topic of the video is: \n\n{state.topic}''' | |
| else: | |
| template = f'''I want to create a detailed storyline for a video in the given topic. You have to provide me that storyline what to include in the video. | |
| Now, i am giving you the topic of the video. But the need is to generate the story focusing on the format that i'll provide to you. | |
| You can use this format for the reference purpose, not for the exact similar generation. The format is:\n{retrieval}. | |
| \n\n Now let's start creating the storyline for my topic. The topic of the video is: \n\n{state.topic}\n\n | |
| **Final Reminder** You have to strongly focus on these topics while creating the storyline: {state.preferred_topics[-1]}''' | |
| messages = [SystemMessage(content=template)] | |
| response = llm.bind_tools([StoryFormatter]).invoke(messages) | |
| print('The response is:',response) | |
| if hasattr(response, 'tool_calls') and response.tool_calls: | |
| response = response.tool_calls[0]['args'] | |
| elif hasattr(response, 'content'): | |
| response = response.content | |
| else: | |
| response = "No response" | |
| state.stories.append(response) | |
| # return State(messages="Story generated", topic=state.topic,stories=state.stories) | |
| return state | |
| def generate_brainstroming(state:State)-> State: | |
| story=state.stories[-1] | |
| template= f'''I want to brainstorm ways to diversify or improve a storyline in exactly 4 sentences. | |
| The goal is to generate creative and actionable ideas that are not on the storyline on how the storyline can be expanded or modified for better engagement. | |
| For example: If the storyline is about creating a promotional video for a restaurant, the new suggestions might include: | |
| - I want to showcase the chef preparing a signature dish. | |
| - I want to add a sequence of customers sharing their experiences at the restaurant. | |
| - I want to highlight the farm-to-table sourcing of ingredients with a short segment showing local farms. | |
| - I want to include a time-lapse of the restaurant transforming from day to night, capturing its unique ambiance. | |
| - I want to feature a quick interview with the owner sharing the story behind the restaurant. | |
| Now, I will provide you with the storyline. The storyline is:\n{story}''' | |
| messages = [SystemMessage(content=template)] | |
| response = llm.bind_tools([BrainstromTopicFormatter]).invoke(messages) | |
| print('The response is:',response) | |
| if hasattr(response, 'tool_calls') and response.tool_calls: | |
| response = response.tool_calls[0]['args'] | |
| elif hasattr(response, 'content'): | |
| response = response.content | |
| else: | |
| response = "No response" | |
| state.brainstroming_topics.append(response) | |
| print('The brainstroming topics are:',state.brainstroming_topics) | |
| # return State(messages="Story generated",topic=state.topic,brainstroming_topics=state.brainstroming_topics) | |
| return state | |
| def select_preferred_topics(state: State)-> State: | |
| print("---human_feedback---") | |
| topic_values = list(state.brainstroming_topics[-1].values()) | |
| print("Available topics:") | |
| for idx, topic in enumerate(topic_values, 1): | |
| print(f"{idx}. {topic}") | |
| raw_input_str = input("Enter the numbers of your preferred topics (comma-separated), or press Enter to skip: ").strip() | |
| if not raw_input_str: | |
| state.carry_on=False | |
| print("No topics selected. Ending process.") | |
| return state | |
| try: | |
| preferred_indices = [int(i.strip()) for i in raw_input_str.split(",")] | |
| preferred_topics = [topic_values[i - 1] for i in preferred_indices if 0 < i <= len(topic_values)] | |
| state.preferred_topics.append(preferred_topics) | |
| except Exception: | |
| state.carry_on=False | |
| print("Invalid input. Please try again.") | |
| return state | |
| if not preferred_topics: | |
| state.carry_on=False | |
| print("No valid topics selected. Ending process.") | |
| return state | |
| print("You selected:") | |
| print(preferred_topics) | |
| state.carry_on=True | |
| return state | |
| # def select_preferred_topics(state: State) -> State: | |
| # print("---API_feedback_mode---") | |
| # if not state.brainstroming_topics: | |
| # print("No brainstormed topics found.") | |
| # state.carry_on = False | |
| # return state | |
| # # Get the latest set of brainstormed topics | |
| # topic_values = list(state.brainstroming_topics[-1].values()) | |
| # print(f"Available topics: {topic_values}") | |
| # # Ensure preferred_topics is well-formed | |
| # if state.preferred_topics and isinstance(state.preferred_topics[-1], list): | |
| # latest_selection = state.preferred_topics[-1] | |
| # if latest_selection: | |
| # print("User selected topics:") | |
| # print(latest_selection) | |
| # state.carry_on = True | |
| # return state | |
| # print("No preferred topics selected via API. Ending feedback loop.") | |
| # state.carry_on = False | |
| # return state | |
| def generate_final_story(state:State)-> State: | |
| template = f'''I want to create a detailed storyline for a video in the given topic. You have to provide me that storyline what to include in the video. | |
| Now, i am giving you the topic of the video. But the need is to generate the story focusing on the format that i'll provide to you. | |
| You can use this format for the reference purpose, not for the exact similar generation. The format is:\n{state.retrievals[-1]}. | |
| \n\n Now let's start creating the storyline for my topic. The topic of the video is: \n\n{state.topic}\n\n | |
| **Final Reminder** You have to strongly focus on these topics while creating the storyline: {[item for sublist in state.preferred_topics for item in sublist]}''' | |
| messages = [SystemMessage(content=template)] | |
| response = llm.bind_tools([StoryFormatter]).invoke(messages) | |
| print('The final response is:',response) | |
| if hasattr(response, 'tool_calls') and response.tool_calls: | |
| response = response.tool_calls[0]['args'] | |
| elif hasattr(response, 'content'): | |
| response = response.content | |
| else: | |
| response = "No response" | |
| state.final_story=response | |
| state.stories.append(response) | |
| return state | |
| def route_after_selection(state:State): | |
| print('The output is:',state.carry_on) | |
| return state.carry_on | |