Spaces:
Sleeping
Sleeping
| from langchain_core.messages import SystemMessage | |
| from .tools import StoryFormatter | |
| from .models_loader import llm | |
| import base64 | |
| from PIL import Image | |
| from io import BytesIO | |
| from fastapi import UploadFile | |
| from huggingface_hub import InferenceClient | |
| from .prompts import story_to_prompt | |
| def generate_final_story(query): | |
| if len(query['preferred_topics'])>0: | |
| template = f'''I want to create a detailed storyline for a video in the given topic. You have to provide me that storyline what to include in the video. | |
| Now, i am giving you the topic of the video. But the need is to generate the story focusing on the format that i'll provide to you. | |
| You can use this format for the reference purpose, not for the exact similar generation. The format is:\n{query['retrievals'][-1]}. | |
| \n\n Now let's start creating the storyline for my topic. The topic of the video is: \n\n{query['topic']}\n\n | |
| **Final Reminder** You have to strongly focus on these topics while creating the storyline: {[item for sublist in query['preferred_topics'] for item in sublist]}''' | |
| messages = [SystemMessage(content=template)] | |
| response = llm.bind_tools([StoryFormatter]).invoke(messages) | |
| print('The final response is:',response) | |
| if hasattr(response, 'tool_calls') and response.tool_calls: | |
| response = response.tool_calls[0]['args'] | |
| elif hasattr(response, 'content'): | |
| response = response.content | |
| else: | |
| response = "No response" | |
| # state.final_story.append(response) | |
| # state.stories.append(response) | |
| return response | |
| else: | |
| return query['stories'][-1] | |
| def encode_image_to_base64(uploaded_file: UploadFile) -> str: | |
| return base64.b64encode(uploaded_file.file.read()).decode("utf-8") | |
| # Convert base64 string to PIL image (optional for LangGraph processing) | |
| def process_image(base64_str: str) -> Image.Image: | |
| image_data = base64.b64decode(base64_str) | |
| return Image.open(BytesIO(image_data)) | |
| def generate_prompt(final_story): | |
| print('************Entering prompt generator****************') | |
| messages = [ | |
| ( | |
| "system", | |
| story_to_prompt, | |
| ), | |
| ("human", final_story), | |
| ] | |
| prompt = llm.invoke(messages) | |
| print('The prompt is:',prompt) | |
| return prompt.content | |
| def generate_image(final_story): | |
| prompt = generate_prompt(final_story) | |
| print('************Finished prompt generator****************') | |
| client = InferenceClient( | |
| provider="hf-inference", | |
| api_key="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", | |
| ) | |
| print('************Finished calling generator****************') | |
| # output is a PIL.Image object | |
| image = client.text_to_image( | |
| prompt, | |
| model="black-forest-labs/FLUX.1-schnell", | |
| ) | |
| print('*****************Image Created*******************') | |
| image.save('image.png') | |
| print('*****************Image Saved*******************') | |
| return "Image Created" | |
| # try: | |
| # return image | |
| # except: | |
| # return 'Image created' | |