File size: 4,788 Bytes
946d35b
8039e4b
6c655a3
946d35b
 
 
 
708437f
efe9a51
72a7f4f
96b0973
563ce7c
 
8039e4b
 
 
6874dac
efe9a51
6874dac
 
 
 
 
8039e4b
6874dac
 
 
8039e4b
6874dac
 
8039e4b
 
 
6874dac
8039e4b
 
6874dac
efe9a51
6c655a3
a0929ab
8039e4b
96b0973
8039e4b
6874dac
8039e4b
 
 
 
6874dac
946d35b
 
 
 
 
 
 
 
 
708437f
 
 
3c1150c
708437f
3c1150c
 
8039e4b
3c1150c
 
8039e4b
708437f
 
 
3c1150c
 
708437f
 
 
 
32131c3
708437f
 
 
 
 
 
 
 
 
 
 
 
3c1150c
 
 
 
 
 
 
 
 
 
708437f
563ce7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138

from langchain_core.messages import SystemMessage, ToolMessage, HumanMessage, FunctionMessage
from .tools import retrieve_tool 
import base64
from PIL import Image
from io import BytesIO
from fastapi import UploadFile
from huggingface_hub import InferenceClient
from .prompts import story_to_prompt , final_story_prompt
import os
from langgraph.prebuilt import create_react_agent
import pandas as pd
from datasets import load_dataset
from src.genai.utils.models_loader import llm_gpt



def generate_final_story(final_state):
    if 'preferred_topics' in final_state:
        if len(final_state['preferred_topics'])>0:
            template = final_story_prompt(final_state)
            messages = [SystemMessage(content=template),
                        HumanMessage(content=f'''The idea of the video is:\n{final_state['idea']}\n '''),
                        FunctionMessage(content=f'''The business details is:\n{final_state['business_details']}\nThe data of influencers is:\n{final_state['retrievals'][-1]}''',name='final_story_tool')]
            print('The message of final story:',messages)

            react_agent=create_react_agent(
            model=llm_gpt,
            tools=[])

            for message_chunk , metadata in react_agent.stream({'messages':messages},stream_mode='messages'):
                yield message_chunk.content

        else:
            for chunk in final_state['stories'][-1]:
                yield chunk
    else:
        template = final_story_prompt(final_state)
        influencers_data = retrieve_tool(final_state)
        messages = [SystemMessage(content=template),
                        FunctionMessage(content=f'''The business details is:\n{str(final_state)}\nThe data of influencers is:\n{influencers_data}''',name='final_story_tool')]
        react_agent=create_react_agent(
            model=llm_gpt,
            tools=[])
        
        for message_chunk , metadata in react_agent.stream({'messages':messages},stream_mode='messages'):
            yield message_chunk.content

        


def encode_image_to_base64(uploaded_file: UploadFile) -> str:
    return base64.b64encode(uploaded_file.file.read()).decode("utf-8")
  

# Convert base64 string to PIL image (optional for LangGraph processing)
def process_image(base64_str: str) -> Image.Image:
    image_data = base64.b64decode(base64_str)
    return Image.open(BytesIO(image_data))


def generate_prompt(final_story,business_details,refined_ideation):
    print('************Entering prompt generator****************')
    messages = [SystemMessage(content=story_to_prompt()),
                HumanMessage(content=f'''The scene-by-scene video story is {final_story}'''),
                FunctionMessage(content=f'''The business details is:\n{business_details}\nThe idea is{refined_ideation}''',name='prompt_generation_id')
                ]

    prompt = llm_gpt.invoke(messages)
    print('The prompt is:',prompt)
    return prompt.content

def generate_image(final_story, business_details, refined_ideation):
    prompt = generate_prompt(final_story, business_details, refined_ideation)
    print('************Finished prompt generator****************')

    client = InferenceClient(
        provider="hf-inference",
        api_key=os.environ.get('HUGGINGFACEHUB_ACCESS_TOKEN'),
    )

    print('************Finished calling generator****************')


        # output is a PIL.Image object
    image = client.text_to_image(
        prompt,
        model="black-forest-labs/FLUX.1-schnell",
    )
    print('*****************Image Created*******************')

    # Convert image to BytesIO buffer
    buffered = BytesIO()
    image.save(buffered, format="PNG")  # you can also use "JPEG" if preferred
    buffered.seek(0)

    # Encode to base64 string
    img_base64 = base64.b64encode(buffered.read()).decode("utf-8")
    print('*****************Image Encoded to Base64*******************')

    return img_base64


def save_to_db(business_details):
    dataset = load_dataset("subashdvorak/tiktok-agentic-story")['train']
    # dataset = load_influencer_data()
    df = pd.DataFrame(dataset)

    # 2. Flatten all business detail values to a set of lowercase strings
    all_values = set()
    for v in business_details.values():
        if isinstance(v, str):
            all_values.add(v.lower())
        elif isinstance(v, list):
            all_values.update(map(str.lower, map(str, v)))

    # 3. Match rows where ANY column contains ANY of the values
    def row_matches(row):
        return any(
            str(cell).lower().find(val) != -1
            for cell in row
            for val in all_values
        )

    # 4. Apply row-wise matching
    matched_df = df[df.apply(row_matches, axis=1)]
    matched_df.to_csv('extracted_data.csv')