File size: 23,679 Bytes
cc69e31
 
 
 
5a95d6e
cc69e31
 
5a95d6e
6bbc0db
64f7ed4
cc69e31
5a95d6e
cc37061
 
6bbc0db
4c122ae
 
 
cc69e31
4fdda22
 
3b99bdc
4fdda22
5a95d6e
6bbc0db
cc37061
861c9a4
b6bda85
5ee56ab
6bbc0db
 
 
 
cc37061
6bbc0db
cc69e31
 
5a95d6e
cc69e31
 
 
 
5a95d6e
cc69e31
 
 
 
 
cc37061
66a8a16
b2f2305
780190b
 
 
 
 
 
 
 
b2f2305
 
4c122ae
 
 
 
 
 
 
 
 
 
9e3fc63
4c122ae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3b91b00
4c122ae
 
 
 
 
 
 
4fdda22
780190b
 
 
 
 
 
 
 
 
 
 
5ee56ab
76a6991
5ee56ab
780190b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6bbc0db
 
cd6c2a8
 
 
 
780190b
 
 
 
 
 
 
 
 
b6bda85
780190b
b6bda85
780190b
 
 
 
 
cd6c2a8
 
cc69e31
780190b
 
 
 
 
b6bda85
780190b
 
 
 
 
cc37061
 
 
 
 
 
 
 
 
3c933ba
cc37061
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59ddc69
cc37061
b109625
 
cc37061
 
 
 
 
 
b109625
59ddc69
cc37061
 
b0eb57b
 
 
 
 
 
 
6d9a8f6
b0eb57b
59ddc69
b0eb57b
 
 
 
 
 
6d9a8f6
b0eb57b
 
 
 
 
cc37061
 
 
 
 
 
 
 
 
 
 
 
 
 
861c9a4
 
 
cc69e31
5a95d6e
 
 
b0eb57b
6bbc0db
4fdda22
4c122ae
cd6c2a8
861c9a4
 
 
cc69e31
 
 
 
 
5a95d6e
cc69e31
cc37061
 
cc69e31
 
861c9a4
 
 
 
 
 
 
 
 
 
 
 
 
cd6c2a8
 
 
 
 
300cd63
b6bda85
cd6c2a8
 
 
 
 
 
 
 
 
4fdda22
 
 
b2f2305
4c122ae
 
cd6c2a8
 
b2f2305
 
7e83345
b2f2305
 
 
 
4c122ae
 
 
 
20ca06c
 
4c122ae
 
 
 
 
 
 
b2f2305
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0debf7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
import os
from langchain.schema import Document
from langchain_core.prompts import PromptTemplate
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain_community.vectorstores import FAISS
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from autogluon.tabular import TabularPredictor
import google.generativeai as genai
import datetime
import pandas as pd
from dotenv import load_dotenv
import joblib
import numpy as np
import time
import subprocess
import random
from typing import List

from playwright.sync_api import sync_playwright
from bs4 import BeautifulSoup
from tiktok_downloader import TikTokDownloader

load_dotenv()
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))

MODEL_NAME = "gemini-2.0-flash-thinking-exp-01-21"
OPENAI_MODEL = "gpt-4o-mini"

GEMINI_FLASH_INPUT_TOKEN_COST_PER_1K = 0.000075
GEMINI_FLASH_OUTPUT_TOKEN_COST_PER_1K = 0.0003
GEMINI_PRO_INPUT_TOKEN_COST_PER_1K = 0.00125
GEMINI_PRO_OUTPUT_TOKEN_COST_PER_1K = 0.005

# Function to convert a DataFrame row to a Document
def row_to_document(row):
    content = (
        f"Name of KOL: {row['kol_name']} has {row['FOLLOWERS']} followers, "
        f"received {row['LIKES']} likes, "
        f"average {row['Avg VIEWS from last 5 videos']} views from the last 5 videos, "
        f"and costs ${row['COST']} per collaboration.")
    metadata = {
        'KOL Name': row['kol_name'],
        'FOLLOWERS': row['FOLLOWERS'],
        'LIKES': row['LIKES'],
        'Avg VIEWS from last 5 videos': row['Avg VIEWS from last 5 videos'],
        'COST': row['COST']}
    return Document(page_content=content,metadata=metadata)


def get_clip_link(url_clip) -> str:
    try:
        print(f"current dir: {str(os.getcwd())}")
        downloader = TikTokDownloader()
        output_path = downloader.download_video(url_clip)
        final_conclusion = video_understanding(video_file_name = output_path)
        return f"respone from chatbot when using get clips link: {final_conclusion}." 
    except Exception as e:
        return f"got error from chatbot when using get clips link: {e}." 


def get_profile_link(url_profile) -> str:
    store: List[str] = []
    download_dir = os.path.join(os.getcwd(), "tiktok_videos")
    try:
        os.makedirs(download_dir, exist_ok=True)
        print(f"Downloading videos to: {download_dir}")
        subprocess.run([
            'yt-dlp',
            url_profile,
            '-P', download_dir,
            '--playlist-end', '3', # edit num videos here
            '--no-overwrites'
        ], check=True)
        print("Download completed successfully")
        video_files = []
        for filename in os.listdir(download_dir):
            if filename.lower().endswith(".mp4"):
                video_files.append(filename)
        renamed_files = []
        for old_name in video_files:
            try:
                new_name = f"{random.randint(10000, 99999)}.mp4"
                old_path = os.path.join(download_dir, old_name)
                new_path = os.path.join(download_dir, new_name)
                os.rename(old_path, new_path)
                renamed_files.append(new_path)
                print(f"Renamed {old_name} -> {new_name}")
            except Exception as rename_error:
                print(f"Error renaming {old_name}: {str(rename_error)}")
        for file_path in renamed_files:
            try:
                print(f"Processing: {file_path}")
                if os.path.exists(file_path):
                    content = video_understanding(video_file_name=file_path)
                    store.append(content)
                    time.sleep(1)
                else:
                    print(f"File not found: {file_path}")
            except Exception as processing_error:
                print(f"Error processing {file_path}: {str(processing_error)}")
            finally: 
                try:
                    if os.path.exists(file_path):
                        os.remove(file_path)
                        print(f"Successfully deleted {file_path}")
                except Exception as delete_error:
                    print(f"Error deleting {file_path}: {str(delete_error)}")
        final_conclusion = "\n".join(store) if store else "Cannot download mp4 file from tiktok profile link"
        return f"Response from profile link analysis: {final_conclusion}"
    except subprocess.CalledProcessError as e:
        return f"Download failed: {str(e)}"
    except Exception as e:
        return f"Processing error: {str(e)}"


def video_understanding(video_file_name) -> str:
    try:
        print(f"Uploading file...")
        video_file = genai.upload_file(path=video_file_name)
        print(f"Completed upload: {video_file.uri}")
        while video_file.state.name == "PROCESSING": # Check whether the file is ready to be used.
            print('.')
            time.sleep(3)
            video_file = genai.get_file(video_file.name)
            print("File upload successful!")
        if video_file.state.name == "FAILED":
            raise ValueError(video_file.state.name)
        model_name = "gemini-2.0-flash-thinking-exp-01-21"
        # MODEL_NAME = "gemini-2.0-pro-exp-02-05"
        model = genai.GenerativeModel(model_name=model_name)
        print("Making LLM inference request...")
        prompt = """You are tasked with analyzing a video based on its content. Your analysis should focus on the presence of the following elements:
    *   Sound Effects: Sound effects used during dialogue, excluding game sounds or spoken dialogue.
    *   Fast Cuts: Rapid scene transitions instead of maintaining a single scene throughout the clip.
    *   Zoom-Ins: Zoom-in effects on images, as opposed to clips without any zoom-in effects.
    *   Face cam: MUST have face came
    *   Talkative: MUST have many words spoken
    *   Format video: MUST have split video, 1 for content and 1 for face cam of KOLs

    For each of these elements identified in the video description:
        1. Conclusion: Indicate whether the element is present in the clip.
        2. Details:
            a. List each element as a bullet point.
            b. Under each element, provide the timestamp(s) where it is detected. For example:
                Sound Effects: [00:15], [01:20]
    After completing the analysis, provide creative feedback on how to enhance the video's performance and virality on social media platforms. Consider aspects such as storytelling, pacing, audience engagement, and current trends.
    """
        content = [video_file, prompt]
        input_token_count = model.count_tokens(content).total_tokens
        response = model.generate_content(content,request_options={"timeout": 600})
        response.resolve()
        output_token_count = response.usage_metadata.total_token_count
        genai.delete_file(name=video_file.name)
        print("Processing done!!!")
        if MODEL_NAME == "gemini-1.5-flash":
            input_cost = (input_token_count / 1000) * GEMINI_FLASH_INPUT_TOKEN_COST_PER_1K
            output_cost = (output_token_count / 1000) * GEMINI_FLASH_OUTPUT_TOKEN_COST_PER_1K
            cost = input_cost + output_cost
        else:
            input_cost = (input_token_count / 1000) * GEMINI_PRO_INPUT_TOKEN_COST_PER_1K
            output_cost = (output_token_count / 1000) * GEMINI_PRO_OUTPUT_TOKEN_COST_PER_1K
            cost = input_cost + output_cost
        return f"respone from chatbot using video understanding: {response.text} \n With cost: ${str(cost)}"
    except Exception as e:
        return f"got error from chatbot when using video understanding: {e}"


def binary_model(
        followers: str = "",
        avg_views: str = "",
) -> str:
    try:
        input_str = f"follower {followers}, avg_views {avg_views}"
        prompt = PromptTemplate(template="""You are tasked to make judgement whether this KOL is fits or not for our platform
    based on number of followers and avg views of last 5 videos, the cut range:
        - followers: at least 100k-500k followers (mid-tier), 
                    acceptable in 500k-1M followers (macro)
                    very good in 1M-10M followers (mega)
        - avg_views: views of last 5 videos must be in range at least 20k views upto 100k views, if higher also better
        - clips content: based on chat history (prompt), content of clips must have at least face cam, talkative and split screen
    REMEMBER to MAKE A DECISION if this KOLs is suitable for our platform or not at the end of response
    input params: {input_str}""", input_variables=["input_str"])
        model = ChatOpenAI(model=OPENAI_MODEL)
        chain = prompt | model | StrOutputParser()
        final_decision = chain.invoke(input_str)
        return f"respone from chatbot using binary model: {final_decision}"
    except Exception as e:
        return f"got error from chatbot when using binary_model: {e}"


def call_rag_workflow(prompts:str = "") -> str:
    try:
        prompt = PromptTemplate(template="""Answer a query given in a natural, human-like manner : {prompt}
        Information: {information}""", input_variables=["prompt","information"])
        vector_store = FAISS.from_documents(documents=documents, embedding=OpenAIEmbeddings())
        retriever = vector_store.as_retriever(search_kwargs={"k": 8}, search_type="mmr")
        model = ChatOpenAI(model=OPENAI_MODEL)
        chain = ({"information": retriever, "prompt": RunnablePassthrough()} | prompt | model | StrOutputParser())
        response = chain.invoke(prompts)
        return f"respone from chatbot using RAG: {str(response)}"
    except Exception as e:
        return f"got error from chatbot when using using call_rag_workflow: {e}"


def inference_cost_model(
    month: str = "",
    followers: str = "",
    tier: str = "",
    likes: str = "",
    avg_views: str = "",
) -> str:
    model = TabularPredictor.load("model/cost_prediction/ag-20241226_034850",require_py_version_match=False)
    temp_df = pd.DataFrame({"Month": [month],
                            "FOLLOWERS": [float(followers)],
                            "TIER": [tier],
                            "LIKES": [float(likes)],
                            "Avg VIEWS from last 5 videos": [float(avg_views)]
                           })
    prediction = model.predict(temp_df)
    return f"respone from chatbot using cost prediction model: {str(prediction[0])} USD"


def inference_view_model(
    month: str = "",
    followers: str = "",
    tier: str = "",
    likes: str = "",
    game: str = "",
    country: str = ""
) -> str:
    model_view = TabularPredictor.load("model/view_prediction/ag-20250206_080831", require_py_version_match=False)
    temp_view = pd.DataFrame({"Month": [month],
                            "FOLLOWERS": [float(followers)],
                            "TIER": [tier],
                            "LIKES": [float(likes)],
                            "GAME": [game],
                            "COUNTRY": [country],
                           })
    prediction_view = model_view.predict(temp_view)
    return f"respone from chatbot using views prediction model: {str(prediction_view[0])} views"


def inference_viral_model(
    month: str = "",
    followers: str = "",
    tier: str = "",
    likes: str = "",
    game: str = "",
    country: str = "",
    avg_views: str = ""
) -> str:
    model = TabularPredictor.load("model/view_prediction/ag-20250206_072707", require_py_version_match=False)
    temp_df = pd.DataFrame({"Month": [month],
                            "FOLLOWERS": [float(followers)],
                            "TIER": [tier],
                            "LIKES": [float(likes)],
                            "GAME": [game],
                            "COUNTRY": [country],
                            "Avg VIEWS from last 5 videos": [float(avg_views)]
                           })
    prediction = model.predict(temp_df)
    return f"respone from chatbot using viral prediction model: {str(prediction[0])}% rate for viral video"


def inference_ranking_model(
    followers: str = "",
    likes: str = "",
    avg_views: str = "",
    cost: str = "",
) -> str: # need kmeans, scaler
    cluster_to_score = {0: 1, 1: 2, 3: 3, 4: 4, 2: 5}
    arr = np.array([[followers, likes, avg_views, cost]])
    arr_scaled = scaler.transform(arr)
    cluster_id = kmeans.predict(arr_scaled)[0]
    retention_score = cluster_to_score[cluster_id]
    return f"respone from chatbot using cost kol ranking model: {str(retention_score)} rank (from 1 to 5, 1 is the best, 5 is the worst)"


def test_tools(prompts:str = "") -> str:
    return f"this is the testing for function tool calls: {prompts}"

def get_tools():
    tools = [call_rag_workflow, 
             inference_cost_model, 
             inference_view_model,
             inference_viral_model,
             inference_ranking_model,
             video_understanding,
             get_profile_link,
             binary_model,
             get_clip_link,
             test_tools
             ]
    tools = {func.__name__: func for func in tools}
    return tools


data_lineup = pd.read_csv("data/data_lineup.csv")
data_lineup.rename(columns = {"KOL Name" : "kol_name"}, inplace=True)
documents = data_lineup.apply(row_to_document, axis=1).tolist()
scaler = joblib.load('model/ranking_score/scaler.save')
kmeans = joblib.load('model/ranking_score/kmeans_model.save')

openai_tools = [
    {
        "type": "function",
        "function": {
            "name": "test_tools",
            "description": """Call this function when prompt of user is only 'test'.""",
            "parameters": {"type": "object", 
                "properties": {
                    "prompts": {"type": "string", "description": """prompt of user calling for testing. Default: None"""},
                }, 
                "required": ["prompts"]
                          },
        },
    },
    {
        "type": "function",
        "function": {
            "name": "binary_model",
            "description": """Call this function when users want to make decision if this KOL could fits for platform or not, only after users input number
            of follower, average view of last 5 videos and at least 1 video clips url for analysis clips content. Don't make up KOL's information and ask 
            again if missing information. MAKE A DECISION if this KOLs is suitable for our platform or not at the end of response.""",
            "parameters": {"type": "object", 
                "properties": {
                    "followers": {"type": "string", "description": """number of followers of KOL, e.g 100000. Default: None"""},
                    "avg_views": {"type": "string", "description": """average views of last 5 videos, e.g 12000. Default: None"""},
                }, 
                "required": ["followers", "avg_views"]
                          },
        },
    },
    {
        "type": "function",
        "function": {
            "name": "get_clip_link",
            "description": """Call this function whenever user input clips link Tiktok (e.g. https://www.tiktok.com/@abcd/video/123456789, DO NOT call this function if user input profile link) 
            to get video link of him/her and make a decision if this profile could fit 
            for us or not, based on clips content (must have face cam, talkative, split screen) , avg views last 5 videos (must in range 20k to 100k views) and 
            followers (must over 100k followers)!!!.""",
            "parameters": {"type": "object", 
                "properties": {
                    "url_clip": {"type": "string", "description": """link of profile on Tiktok, e.g: https://www.tiktok.com/@abcd/video/123456789 .Default: None"""},
                }, 
                "required": ["url_clip"]},
        },
    },
    {
        "type": "function",
        "function": {
            "name": "get_profile_link",
            "description": """ONLY call this function whenever user INPUT profile link Tiktok (e.g. https://www.tiktok.com/@abcd, DO NOT call this function if user input clip link) and ASK for judgement 
            to get content of clips and analysis content videos whether it has FACE CAM, TALKATIVE and SPLIT SCREENS!!!. MAKE A DECISION if this KOLs is suitable for our platform or not at the end of response.""",
            "parameters": {"type": "object", 
                "properties": {
                    "url_profile": {"type": "string", "description": """link of profile on Tiktok, e.g: https://www.tiktok.com/@abcd .Default: None"""},
                }, 
                "required": ["url_profile"]},
        },
    },
    {
        "type": "function",
        "function": {
            "name": "call_rag_workflow",
            "description": """Call this function to query knowledge from vector store when you don't make sure about the answer or uncertain answer to users !!!.""",
            "parameters": {"type": "object", 
                "properties": {
                    "prompts": {"type": "string", "description": """prompts from user, you need to understand this request from user 
                    to perform query to get knowdlege from vector store. Default: None"""},
                }, 
                "required": ["prompts"]},
        },
    },
    {
        "type": "function",
        "function": {
            "name": "inference_cost_model",
            "description": """Call this function when users want to get cost prediction after giving information about KOL. Don't make up KOL's information and ask again if missing information.""",
            "parameters": {"type": "object", 
                "properties": {
                    "month": {"type": "string","enum": ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"], 
                              "description": f"""month of current days, e.g Jan. Default: {datetime.datetime.now().strftime("%b")}"""},
                    "followers": {"type": "string", "description": """number of followers of KOL, e.g 100000. Default: None"""},
                    "tier": {"type": "string","enum": ["Macro","Mega","Micro","Mid-Tier","Nano",], 
                             "description": """Tier of KOLs based on likes and followers, e.g Macro. Default: None"""},
                    "likes": {"type": "string", "description": """number of like of KOL, e.g 200000. Default: None"""},
                    "avg_views": {"type": "string", "description": """average views of last 5 videos, e.g 12000. Default: None"""},
                }, 
                "required": ["month", "followers", "tier", "likes", "avg_views"]
                          },
        },
    },
    {
        "type": "function",
        "function": {
            "name": "inference_view_model",
            "description": """Call this function when users want to get views prediction after giving information about KOLs. Don't make up KOL's information and ask again if missing information.""",
            "parameters": {"type": "object", 
                "properties": {
                    "month": {"type": "string","enum": ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"], 
                              "description": f"""month of current days, e.g Jan. Default: {datetime.datetime.now().strftime("%b")}"""},
                    "followers": {"type": "string", "description": """number of followers of KOL, e.g 100000. Default: None"""},
                    "tier": {"type": "string","enum": ["Macro","Mega","Micro","Mid-Tier","Nano",], 
                             "description": """Tier of KOLs based on likes and followers, e.g Macro. Default: None"""},
                    "likes": {"type": "string", "description": """number of like of KOL, e.g 200000. Default: None"""},
                    "game": {"type": "string", "description": """Name of the game that KOL played, e.g COD. Default: None"""},
                    "country": {"type": "string", "description": """Country of that KOL lives in, e.g US, Ger. Default: US"""},
                },
                           "required": ["month", "followers", "tier", "likes", "game", 'country']         
                          },
        }
    },
    {
        "type": "function",
        "function": {
            "name": "inference_viral_model",
            "description": """Call this function when users want to get percentage of viral for ads video after giving information about KOLs. Don't make up KOL's information and ask again if missing information.""",
            "parameters": {"type": "object", 
                "properties": {
                    "month": {"type": "string","enum": ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"], 
                              "description": f"""month of current days, e.g Jan. Default: {datetime.datetime.now().strftime("%b")}"""},
                    "followers": {"type": "string", "description": """number of followers of KOL, e.g 100000. Default: None"""},
                    "tier": {"type": "string","enum": ["Macro","Mega","Micro","Mid-Tier","Nano",], 
                             "description": """Tier of KOLs based on likes and followers, e.g Macro. Default: None"""},
                    "likes": {"type": "string", "description": """number of like of KOL, e.g 200000. Default: None"""},
                    "game": {"type": "string", "description": """Name of the game that KOL played, e.g COD. Default: None"""},
                    "country": {"type": "string", "description": """Country of that KOL lives in, e.g US, Ger. Default: US"""},
                    "avg_views": {"type": "string", "description": """average views of last 5 videos, e.g 12000. Default: None"""},
                },
                           "required": ["month", "followers", "tier", "likes", "game", 'country', "avg_views"]         
                          },
        }
    },
    {
        "type": "function",
        "function": {
            "name": "inference_ranking_model",
            "description": """Call this function when users want to get KOL ranking score (from 1 to 5, 1 is the best, 5 is the worst) after giving information about KOL. Don't make up KOL's information and ask again if missing information.""",
            "parameters": {"type": "object", 
                "properties": {
                    "followers": {"type": "string", "description": """number of followers of KOL, e.g 100000. Default: None"""},
                    "likes": {"type": "string", "description": """number of like of KOL, e.g 200000. Default: None"""},
                    "avg_views": {"type": "string", "description": """average views of last 5 videos, e.g 12000. Default: None"""},
                    "cost": {"type": "string", "description": """cost for KOL to promote our product, e.g 1000, 2000. Default: None"""},
                }, 
                "required": ["followers", "likes", "avg_views", "cost"]},
        },
    },
    {
        "type": "function",
        "function": {
            "name": "video_understanding",
            "description": """Call this function when users give file path to AI for video understanding, 
            keep conclusion of bullet point and MUST return cost of processing.""",
            "parameters": {"type": "object", 
                "properties": {
                    "video_file_name": {"type": "string", "description": """path file of video for processing video understanding. Default: None"""},
                }, 
                "required": ["video_file_name"]},
        },
    }
]
local_tools = get_tools()