junchenfu commited on
Commit
4dcf6b1
·
verified ·
1 Parent(s): 48ec6cb

Upload pipline.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. pipline.py +563 -0
pipline.py ADDED
@@ -0,0 +1,563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import pandas as pd
4
+ import numpy as np
5
+ from sentence_transformers import SentenceTransformer
6
+ from tqdm import tqdm
7
+ import faiss
8
+ import gc
9
+ import openai
10
+ import random
11
+ import torch
12
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, BitsAndBytesConfig
13
+ os.environ["CUDA_VISIBLE_DEVICES"] = "0"
14
+ # ==============================
15
+ # Part 1: Generate RAG Prompt and Save to File
16
+ # ==============================
17
+ def set_random_seed(seed):
18
+ random.seed(seed)
19
+ np.random.seed(seed)
20
+ torch.manual_seed(seed)
21
+ if torch.cuda.is_available():
22
+ torch.cuda.manual_seed_all(seed)
23
+
24
+
25
+ def generate_rag_prompt(user_prompt,
26
+ output_dir,
27
+ total_videos=1974,
28
+ selected_videos_num=50,
29
+ num_tags=1,
30
+ ratio=0.1, # ratio represents the proportion of negative samples
31
+ seed=42):
32
+ # Data Loading
33
+ set_random_seed(seed)
34
+
35
+ # 2. Load various data files
36
+ views_data = pd.read_csv('Microlens/MicroLens-100k_likes_and_views.txt',
37
+ sep='\t', header=None, names=['video_id','likes','views'])
38
+ title_data = pd.read_csv('Microlens/MicroLens-100k_title_en.csv',
39
+ sep=',', header=None, names=['video_id','title_en'])
40
+ cover_data = pd.read_csv('Microlens/llava-v1.5_caption.txt',
41
+ sep=',', header=None, names=['video_id','cover_desc'])
42
+ desc_data = pd.read_csv('Microlens/Microlens100K_captions_en.csv',
43
+ sep='\t', header=None, names=['video_id','caption_en'])
44
+ tags_data = pd.read_csv('Microlens/tags_to_summary.csv',
45
+ sep=',', header=None, names=['video_id','partition'])
46
+
47
+ # 3. Load comment data and count comments for each video_id
48
+ comments_data = pd.read_csv('Microlens/MicroLens-100k_comment_en.txt',
49
+ sep='\t', header=None, names=['user_id','video_id','comment_text'])
50
+ comments_data = comments_data[['video_id','comment_text']]
51
+
52
+ # Group by video_id and count comments
53
+ comment_count_df = (
54
+ comments_data
55
+ .groupby('video_id')['comment_text']
56
+ .count()
57
+ .reset_index(name='comment_count')
58
+ )
59
+
60
+ # Merge all data
61
+ merged = (
62
+ views_data
63
+ .merge(title_data, on='video_id', how='left')
64
+ .merge(cover_data, on='video_id', how='left')
65
+ .merge(desc_data, on='video_id', how='left')
66
+ .merge(tags_data, on='video_id', how='left')
67
+ .merge(comment_count_df, on='video_id', how='left')
68
+ )
69
+
70
+ # Load test set IDs
71
+ test_id_data = pd.read_csv('/MicroLens/test_id.csv',
72
+ sep=',', header=None, names=['video_id'])
73
+
74
+ # Perform inner join with the merged dataframe on 'video_id'
75
+ merged = merged.merge(test_id_data, on='video_id', how='inner')
76
+
77
+ # Drop rows with missing values in key fields
78
+ merged.dropna(subset=['title_en', 'cover_desc', 'caption_en', 'partition', 'comment_count'], inplace=True)
79
+
80
+ # 4. Use SentenceTransformer to create embeddings for 'partition' and user prompts
81
+ model = SentenceTransformer('sentence-transformers/all-MiniLM-L12-v2')
82
+ unique_partitions = merged['partition'].unique().tolist()
83
+ partition_embeddings = model.encode(unique_partitions)
84
+ prompt_embedding = model.encode([user_prompt])[0]
85
+
86
+ # Calculate similarities and get top partitions
87
+ similarities = [
88
+ np.dot(prompt_embedding, pe)
89
+ / (np.linalg.norm(prompt_embedding) * np.linalg.norm(pe))
90
+ for pe in partition_embeddings
91
+ ]
92
+ top_k_indices = np.argsort(similarities)[::-1][:num_tags]
93
+ top_k_partitions = [unique_partitions[i] for i in top_k_indices]
94
+
95
+ # Filter data by top partitions
96
+ filtered_data = merged[merged['partition'].isin(top_k_partitions)]
97
+
98
+ # Sort by comment count
99
+ filtered_data = filtered_data.sort_values('comment_count', ascending=False)
100
+
101
+ # Split dataset based on ratio (proportion of negative samples)
102
+ n_negative = int(len(filtered_data) * ratio)
103
+ n_positive = len(filtered_data) - n_negative
104
+
105
+ # Select popular videos (high comment count)
106
+ positive_videos = filtered_data.head(n_positive)
107
+
108
+ # Select unpopular videos from remaining data
109
+ remaining_data = filtered_data.iloc[n_positive:]
110
+ negative_videos = remaining_data.tail(n_negative)
111
+
112
+ # Remove duplicates to prevent overlap
113
+ positive_videos.drop_duplicates(subset=['video_id'], inplace=True)
114
+ negative_videos.drop_duplicates(subset=['video_id'], inplace=True)
115
+
116
+ # Merge positive and negative samples for retrieval
117
+ combined_videos = pd.concat([positive_videos, negative_videos])
118
+ combined_videos.drop_duplicates(subset=['video_id'], keep='first', inplace=True)
119
+
120
+ # Create combined text for embedding
121
+ combined_texts = (
122
+ combined_videos['title_en'] + " "
123
+ + combined_videos['cover_desc'] + " "
124
+ + combined_videos['caption_en']
125
+ ).tolist()
126
+ combined_embeddings = model.encode(combined_texts)
127
+
128
+ # Perform similarity search
129
+ index = faiss.IndexFlatL2(combined_embeddings.shape[1])
130
+ index.add(np.array(combined_embeddings).astype('float32'))
131
+ query_embedding = model.encode([user_prompt]).astype('float32')
132
+ _, I = index.search(query_embedding, len(combined_videos))
133
+ retrieved_videos = combined_videos.iloc[I[0]]
134
+
135
+ # Calculate final sample sizes
136
+ final_n_negative = int(selected_videos_num * ratio)
137
+ final_n_positive = selected_videos_num - final_n_negative
138
+
139
+ # Find positive and negative samples from retrieval results
140
+ positive_ids = set(positive_videos['video_id'].tolist())
141
+ negative_ids = set(negative_videos['video_id'].tolist())
142
+
143
+ retrieved_positive = retrieved_videos[retrieved_videos['video_id'].isin(positive_ids)]
144
+ retrieved_negative = retrieved_videos[retrieved_videos['video_id'].isin(negative_ids)]
145
+
146
+ # Select final samples
147
+ final_selected_positive = retrieved_positive.head(final_n_positive)
148
+ final_selected_negative = retrieved_negative.head(final_n_negative)
149
+
150
+ # Maintain retrieval order
151
+ final_selected_videos = pd.concat([final_selected_positive, final_selected_negative])
152
+ final_selected_videos = final_selected_videos.loc[
153
+ retrieved_videos.index.intersection(final_selected_videos.index)
154
+ ]
155
+ # Build output text
156
+ rag_positive_context = "\n".join([
157
+ f"Reference Video {i+1} (Positive Sample - Popular):\n"
158
+ f"Title: {row['title_en']}\n"
159
+ f"Cover Desc: {row['cover_desc']}\n"
160
+ f"Desc: {row['caption_en']}\n"
161
+ f"Comment Count: {row['comment_count']}\n"
162
+ for i, (idx, row) in enumerate(final_selected_positive.iterrows())
163
+ ])
164
+
165
+ rag_negative_context = "\n".join([
166
+ f"Reference Video {i+1} (Negative Sample - Unpopular):\n"
167
+ f"Title: {row['title_en']}\n"
168
+ f"Cover Desc: {row['cover_desc']}\n"
169
+ f"Desc: {row['caption_en']}\n"
170
+ f"Comment Count: {row['comment_count']}\n"
171
+ for i, (idx, row) in enumerate(final_selected_negative.iterrows())
172
+ ])
173
+
174
+ rag_context = rag_positive_context + "\n" + rag_negative_context
175
+
176
+ cot_prompt = f"""
177
+ Now that you're a talented video creator with a wealth of ideas, you need to think from the user's perspective and after that generate the most popular video title, an AI-generated cover prompt, and a 3-second AI-generated video prompt.
178
+
179
+ Below is the user query:
180
+
181
+ {user_prompt}
182
+
183
+ Below is the reasoning chain (Chain of Thought) that you should follow step-by-step.
184
+
185
+ Reasoning Chain:
186
+ 1. Analyze both popular and unpopular videos as references using the provided context:
187
+ {rag_context}
188
+ 2. Based on the analyzed videos, brainstorm unique and creative ideas for a new video topic. Ensure that the idea is original and does not replicate existing content, as direct copying is strictly forbidden.
189
+ 3. Write or conceptualize a logical and original script or content based on a Step2 topic
190
+ 4. Double-check:
191
+ 1. Whether the theme and content are accurately conveyed
192
+ 2. Whether the theme and content are strongly related and complete in fulfilling the user's needs
193
+ 5. Start generating based on confirmed topics and content
194
+ 6. Re-evaluate:
195
+ 1. Whether the generated prompt is logically correct.
196
+ 2. Check if the final suggestions match popular trends
197
+ 7. If it doesn't meet expectations, refine and finalize the output.
198
+
199
+ Explicitly generate a chain of thought during the reasoning process for each candidate. The chain of thought should detail the steps, considerations, and rationale behind the candidate generation, ensuring transparency and clarity in the decision-making process.
200
+
201
+ Final Answer Requirements:
202
+ - A single line for the final generated Title (MAX_length = 50).
203
+ - A single paragraph for the Cover Prompt.
204
+ - A single paragraph for the Video Prompt (3-second).
205
+
206
+
207
+ Now, based on the above reasoning, generate the response in JSON format, here is an example:
208
+ {{
209
+ "title": "Unveiling the Legacy of Ancient Rome: Rise, Glory, and Downfall.",
210
+ "cover_prompt": "Generate an image of a Roman Emperor standing proudly in front of the Colosseum, with a subtle sunset backdrop, highlighting the contrast between the ancient structure.",
211
+ "video_prompt": "Open with a 3-second aerial shot of the Roman Forum, showcasing the sprawling ancient ruins against a clear blue sky, before zooming in on a singular, imposing structure like the Colosseum."
212
+ }}
213
+ Please provide your answer following this exact JSON template for the response.
214
+ """
215
+
216
+ # Save prompt to file
217
+ output_file = os.path.join(output_dir, f"{user_prompt}.txt")
218
+ with open(output_file, "w", encoding="utf-8") as f:
219
+ f.write(cot_prompt)
220
+
221
+ print(f"RAG prompt saved to {output_file}")
222
+
223
+
224
+
225
+ # ==============================
226
+ # Part 2: Load RAG Prompts and Perform Inference
227
+ # ==============================
228
+ def inference_from_prompts(input_dir, output_dir,seed=42):
229
+ set_random_seed(seed)
230
+ llama_model_name = "meta-llama/Llama-3.3-70B-Instruct"
231
+ tokenizer = AutoTokenizer.from_pretrained(llama_model_name)
232
+ quantization_config = BitsAndBytesConfig(load_in_4bit=True)
233
+ model_llama = AutoModelForCausalLM.from_pretrained(
234
+ llama_model_name, device_map="auto", torch_dtype=torch.bfloat16, quantization_config=quantization_config
235
+ )
236
+
237
+ llama_pipeline = pipeline(
238
+ "text-generation",
239
+ model=model_llama,
240
+ tokenizer=tokenizer,
241
+ max_new_tokens=5000,
242
+ temperature=0.7,
243
+ top_p=0.9,
244
+ repetition_penalty=1.1,
245
+ do_sample=True
246
+ )
247
+
248
+ for file_name in tqdm(os.listdir(input_dir), desc="inferencing"):
249
+ output_path = os.path.join(output_dir, file_name)
250
+ if os.path.exists(output_path):
251
+ continue
252
+
253
+ if file_name.endswith(".txt"):
254
+ # 读取输入文件内容
255
+ input_path = os.path.join(input_dir, file_name)
256
+ with open(input_path, "r", encoding="utf-8") as f:
257
+ cot_prompt = f.read()
258
+
259
+ # 定义 system 和 user 的内容
260
+ messages = [
261
+ {"role": "system", "content": "Now that you're a talented video creator with a wealth of ideas, you need to think from the user's perspective and after that generate the most popular video title, an AI-generated cover prompt, and a 3-second AI-generated video prompt."},
262
+ {"role": "user", "content": cot_prompt},
263
+ ]
264
+
265
+
266
+ # 调用模型推理
267
+ response = llama_pipeline(messages, num_return_sequences=1)
268
+ full_output = response[0]['generated_text']
269
+
270
+ # 保存输出到指定路径
271
+ output_path = os.path.join(output_dir, file_name)
272
+ with open(output_path, "w", encoding="utf-8") as f:
273
+ f.write(full_output[2]['content'])
274
+
275
+ print(f"Inference result saved to {output_path}")
276
+
277
+ def inference_from_prompts_qwen(input_dir, output_dir, seed=42):
278
+ # Set random seed for reproducibility
279
+ set_random_seed(seed)
280
+
281
+ # Qwen model name
282
+ qwen_model_name = "Qwen/Qwen2.5-72B-Instruct"
283
+
284
+ # Load tokenizer with trust_remote_code=True
285
+ tokenizer = AutoTokenizer.from_pretrained(qwen_model_name, trust_remote_code=True)
286
+
287
+ # Configure 4-bit quantization
288
+ bnb_config = BitsAndBytesConfig(
289
+ load_in_4bit=True,
290
+ )
291
+
292
+ # Load Qwen model with auto device mapping and quantization config
293
+ model_qwen = AutoModelForCausalLM.from_pretrained(
294
+ qwen_model_name,
295
+ device_map="auto",
296
+ quantization_config=bnb_config,
297
+ trust_remote_code=True
298
+ )
299
+
300
+ # Create text generation pipeline with consistent parameters
301
+ qwen_pipeline = pipeline(
302
+ "text-generation",
303
+ model=model_qwen,
304
+ tokenizer=tokenizer,
305
+ max_new_tokens=5000,
306
+ temperature=0.7,
307
+ top_p=0.9,
308
+ repetition_penalty=1.1,
309
+ do_sample=True
310
+ )
311
+
312
+ # Process all txt files in input directory
313
+ for file_name in tqdm(os.listdir(input_dir), desc="inferencing"):
314
+ if file_name.endswith(".txt"):
315
+ # Read input file content
316
+ input_path = os.path.join(input_dir, file_name)
317
+ with open(input_path, "r", encoding="utf-8") as f:
318
+ cot_prompt = f.read()
319
+
320
+ # Define system and user messages
321
+ messages = [
322
+ {
323
+ "role": "system",
324
+ "content": (
325
+ "Now that you're a talented video creator with a wealth of ideas, "
326
+ "you need to think from the user's perspective and after that generate "
327
+ "the most popular video title, an AI-generated cover prompt, and a 3-second "
328
+ "AI-generated video prompt."
329
+ )
330
+ },
331
+ {"role": "user", "content": cot_prompt},
332
+ ]
333
+
334
+ # Call model for inference
335
+ response = qwen_pipeline(messages, num_return_sequences=1)
336
+ full_output = response[0]['generated_text']
337
+
338
+ # Save output to specified path
339
+ output_path = os.path.join(output_dir, file_name)
340
+ with open(output_path, "w", encoding="utf-8") as f:
341
+ f.write(full_output[2]['content'])
342
+
343
+ print(f"Inference result saved to {output_path}")
344
+
345
+ def inference_from_prompts_mistral(input_dir, output_dir, seed=42):
346
+ # Set random seed for reproducibility
347
+ set_random_seed(seed)
348
+
349
+ # Mistral model name
350
+ mistral_model_name = "mistralai/Mistral-Large-Instruct-2411"
351
+
352
+ # Load tokenizer with trust_remote_code=True
353
+ tokenizer = AutoTokenizer.from_pretrained(mistral_model_name, trust_remote_code=True)
354
+
355
+ # Configure 4-bit quantization
356
+ bnb_config = BitsAndBytesConfig(
357
+ load_in_4bit=True,
358
+ )
359
+
360
+ # Load Mistral model with auto device mapping and quantization config
361
+ model_mistral = AutoModelForCausalLM.from_pretrained(
362
+ mistral_model_name,
363
+ device_map="auto",
364
+ quantization_config=bnb_config,
365
+ trust_remote_code=True
366
+ )
367
+
368
+ # Create text generation pipeline with consistent parameters
369
+ mistral_pipeline = pipeline(
370
+ "text-generation",
371
+ model=model_mistral,
372
+ tokenizer=tokenizer,
373
+ max_new_tokens=5000,
374
+ temperature=0.7,
375
+ top_p=0.9,
376
+ repetition_penalty=1.1,
377
+ do_sample=True
378
+ )
379
+
380
+ # Process all txt files in input directory
381
+ for file_name in tqdm(os.listdir(input_dir), desc="inferencing"):
382
+ output_path = os.path.join(output_dir, file_name)
383
+ if os.path.exists(output_path):
384
+ continue
385
+
386
+ if file_name.endswith(".txt"):
387
+ # Read input file content
388
+ input_path = os.path.join(input_dir, file_name)
389
+ with open(input_path, "r", encoding="utf-8") as f:
390
+ cot_prompt = f.read()
391
+
392
+ # Define system and user messages
393
+ messages = [
394
+ {
395
+ "role": "system",
396
+ "content": (
397
+ "Now that you're a talented video creator with a wealth of ideas, "
398
+ "you need to think from the user's perspective and after that generate "
399
+ "the most popular video title, an AI-generated cover prompt, and a 3-second "
400
+ "AI-generated video prompt."
401
+ )
402
+ },
403
+ {"role": "user", "content": cot_prompt},
404
+ ]
405
+
406
+ # Call model for inference
407
+ response = mistral_pipeline(messages, num_return_sequences=1)
408
+ full_output = response[0]['generated_text']
409
+
410
+ # Save output to specified path
411
+ output_path = os.path.join(output_dir, file_name)
412
+ with open(output_path, "w", encoding="utf-8") as f:
413
+ f.write(full_output[2]['content'])
414
+
415
+ print(f"Inference result saved to {output_path}")
416
+
417
+ import os
418
+ import time
419
+ from tqdm import tqdm
420
+
421
+ def inference_from_prompts_api(input_dir, output_dir, api_key, model="deepseek-reasoner", base_url="https://api.deepseek.com"):
422
+ # Recommended to use 'import openai' directly. Adjust based on your actual library name
423
+ from openai import OpenAI
424
+ print(model)
425
+
426
+ # Initialize DeepSeek client
427
+ client = OpenAI(api_key=api_key, base_url=base_url)
428
+
429
+ # Iterate through files in input directory
430
+ for file_name in tqdm(os.listdir(input_dir), desc="Inferencing with DeepSeek"):
431
+ output_path = os.path.join(output_dir, file_name)
432
+ if file_name.endswith(".txt") and not os.path.exists(output_path):
433
+ # Read input file content
434
+ input_path = os.path.join(input_dir, file_name)
435
+ with open(input_path, "r", encoding="utf-8") as f:
436
+ cot_prompt = f.read()
437
+
438
+ # Define system and user content
439
+ messages = [
440
+ {
441
+ "role": "system",
442
+ "content": (
443
+ "Now that you're a talented video creator with a wealth of ideas, "
444
+ "you need to think from the user's perspective and after that generate "
445
+ "the most popular video title, an AI-generated cover prompt, and a 3-second "
446
+ "AI-generated video prompt."
447
+ )
448
+ },
449
+ {"role": "user", "content": cot_prompt},
450
+ ]
451
+
452
+ # Retry mechanism
453
+ while True:
454
+ try:
455
+ # Call DeepSeek API for inference
456
+ response = client.chat.completions.create(
457
+ model=model,
458
+ messages=messages
459
+ )
460
+ # Break loop if successful
461
+ break
462
+ except Exception as e:
463
+ # Print error message and wait 60 seconds before retrying
464
+ print(f"Error occurred: {e}")
465
+ print("Waiting 60 seconds before retrying...")
466
+ time.sleep(120)
467
+
468
+ full_output = response.choices[0].message.content
469
+
470
+ output_path = os.path.join(output_dir, file_name)
471
+ with open(output_path, "w", encoding="utf-8") as f:
472
+ f.write(full_output)
473
+
474
+ print(f"Inference result saved to {output_path}")
475
+
476
+
477
+
478
+ def inference_from_prompts_gpt(input_dir, output_dir, api_key, model="gpt-4o"):
479
+ """
480
+ Use OpenAI GPT API to process input prompts and generate outputs.
481
+
482
+ Parameters:
483
+ input_dir (str): Directory containing input text files with prompts.
484
+ output_dir (str): Directory to save the generated outputs.
485
+ api_key (str): OpenAI API key for authentication.
486
+ model (str): The GPT model to use (default: "gpt-4").
487
+ """
488
+ # Set the OpenAI API key
489
+ openai.api_key = api_key
490
+
491
+ # Ensure the output directory exists
492
+ os.makedirs(output_dir, exist_ok=True)
493
+
494
+ # Iterate through files in the input directory
495
+ for file_name in tqdm(os.listdir(input_dir), desc="Inferencing with GPT"):
496
+
497
+ if file_name.endswith(".txt"):
498
+ # Read input file content
499
+ input_path = os.path.join(input_dir, file_name)
500
+ with open(input_path, "r", encoding="utf-8") as f:
501
+ cot_prompt = f.read()
502
+
503
+ # Define the system and user prompts
504
+ messages = [
505
+ {"role": "system", "content": "You are a talented video creator with a wealth of ideas. Think from the user's perspective and generate the most popular video title, an AI-generated cover prompt, and a 3-second AI-generated video prompt."},
506
+ {"role": "user", "content": cot_prompt},
507
+ ]
508
+
509
+ # Call the GPT API for inference
510
+ try:
511
+ response = openai.ChatCompletion.create(
512
+ model=model,
513
+ messages=messages,
514
+ stream=False
515
+ )
516
+ # Extract the generated content
517
+ full_output = response['choices'][0]['message']['content']
518
+
519
+ # Save the output to the specified path
520
+ output_path = os.path.join(output_dir, file_name)
521
+ with open(output_path, "w", encoding="utf-8") as f:
522
+ f.write(full_output)
523
+
524
+ print(f"Inference result saved to {output_path}")
525
+
526
+ except Exception as e:
527
+ print(f"Error processing {file_name}: {e}")
528
+
529
+
530
+ if __name__ == "__main__":
531
+ parser = argparse.ArgumentParser()
532
+ parser.add_argument("--USER_PROMPT", type=str, help="The user prompt to process.")
533
+ parser.add_argument("--MODE", type=str, choices=["generate", "infer","infer_api","infer_gpt","infer_qwen","infer_mistral"], required=True, help="Mode: generate or infer.")
534
+ parser.add_argument("--INPUT_DIR", type=str, help="Input directory for inference.")
535
+ parser.add_argument("--OUTPUT_DIR", type=str, required=True, help="Output directory.")
536
+ parser.add_argument("--MODEL", type=str, required=True, help="Model.")
537
+ parser.add_argument("--VID_NUM", type=int, default=10,help="number of selected videos")
538
+ parser.add_argument("--TAGS_NUM", type=int, default=1,help="number of selected videos")
539
+ args = parser.parse_args()
540
+
541
+ if args.MODE == "generate":
542
+ os.makedirs(args.OUTPUT_DIR, exist_ok=True)
543
+ generate_rag_prompt(args.USER_PROMPT, args.OUTPUT_DIR, selected_videos_num=args.VID_NUM,num_tags=args.TAGS_NUM)
544
+ elif args.MODE == "infer":
545
+ os.makedirs(args.OUTPUT_DIR, exist_ok=True)
546
+ inference_from_prompts(args.INPUT_DIR, args.OUTPUT_DIR)
547
+ elif args.MODE == "infer_qwen":
548
+ os.makedirs(args.OUTPUT_DIR, exist_ok=True)
549
+ inference_from_prompts_qwen(args.INPUT_DIR, args.OUTPUT_DIR)
550
+ elif args.MODE == "infer_mistral":
551
+ os.makedirs(args.OUTPUT_DIR, exist_ok=True)
552
+ inference_from_prompts_mistral(args.INPUT_DIR, args.OUTPUT_DIR)
553
+ elif args.MODE == "infer_api":
554
+ os.makedirs(args.OUTPUT_DIR, exist_ok=True)
555
+ inference_from_prompts_api(args.INPUT_DIR, args.OUTPUT_DIR,"API_KEY",model=args.MODEL)
556
+ elif args.MODE == "infer_gpt":
557
+ api_key = "API_KEY"
558
+ os.makedirs(args.OUTPUT_DIR, exist_ok=True)
559
+ inference_from_prompts_gpt(args.INPUT_DIR, args.OUTPUT_DIR,api_key,model="gpt-4o")
560
+ import torch
561
+
562
+ gc.collect()
563
+ torch.cuda.empty_cache()