Update app.py
Browse files
app.py
CHANGED
|
@@ -302,18 +302,6 @@ def save_uploaded_file(file, filename,TEMP_DIR):
|
|
| 302 |
|
| 303 |
client = OpenAI(api_key="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")
|
| 304 |
|
| 305 |
-
def translate_text(text_prompt, target_language):
|
| 306 |
-
response = client.chat.completions.create(
|
| 307 |
-
model="gpt-4o-mini",
|
| 308 |
-
messages=[{"role": "system", "content": "You are a helpful language translator assistant."},
|
| 309 |
-
{"role": "user", "content": f"Translate completely without hallucination, end to end and the ouput should just be the translation of the text prompt and nothing else, and give the following text to {target_language} language and the text is: {text_prompt}"},
|
| 310 |
-
],
|
| 311 |
-
max_tokens = len(text_prompt) + 200 # Use the length of the input text
|
| 312 |
-
# temperature=0.3,
|
| 313 |
-
# stop=["Translate:", "Text:"]
|
| 314 |
-
)
|
| 315 |
-
return response
|
| 316 |
-
|
| 317 |
def openai_chat_avatar(text_prompt):
|
| 318 |
response = client.chat.completions.create(
|
| 319 |
model="gpt-4o-mini",
|
|
@@ -392,7 +380,7 @@ def generate_video():
|
|
| 392 |
|
| 393 |
voice_cloning = request.form.get('voice_cloning', 'no')
|
| 394 |
image_hardcoded = request.form.get('image_hardcoded', 'yes')
|
| 395 |
-
chat_model_used = request.form.get('chat_model_used', '
|
| 396 |
target_language = request.form.get('target_language', 'original_text')
|
| 397 |
print('target_language',target_language)
|
| 398 |
pose_style = int(request.form.get('pose_style', 1))
|
|
@@ -405,11 +393,7 @@ def generate_video():
|
|
| 405 |
preprocess = request.form.get('preprocess', 'crop')
|
| 406 |
print('preprocess selected: ',preprocess)
|
| 407 |
ref_pose_video = request.files.get('ref_pose', None)
|
| 408 |
-
|
| 409 |
-
# if target_language != 'original_text':
|
| 410 |
-
# response = translate_text(text_prompt, target_language)
|
| 411 |
-
# # response = await translate_text_async(text_prompt, target_language)
|
| 412 |
-
# text_prompt = response.choices[0].message.content.strip()
|
| 413 |
|
| 414 |
if chat_model_used == 'ryzedb':
|
| 415 |
response = ryzedb_chat_avatar(text_prompt)
|
|
@@ -431,16 +415,11 @@ def generate_video():
|
|
| 431 |
continue
|
| 432 |
|
| 433 |
else:
|
| 434 |
-
|
| 435 |
-
# text_prompt = response.choices[0].message.content.strip()
|
| 436 |
-
app.config['text_prompt'] = text_prompt
|
| 437 |
-
print('Final output text prompt using openai: ',text_prompt)
|
| 438 |
|
| 439 |
source_image_path = save_uploaded_file(source_image, 'source_image.png',TEMP_DIR)
|
| 440 |
print(source_image_path)
|
| 441 |
-
|
| 442 |
-
# driven_audio_path = await voice_cloning_async(voice_cloning, voice_gender, text_prompt, user_voice)
|
| 443 |
-
|
| 444 |
if voice_cloning == 'no':
|
| 445 |
if voice_gender == 'male':
|
| 446 |
voice = 'echo'
|
|
|
|
| 302 |
|
| 303 |
client = OpenAI(api_key="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")
|
| 304 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 305 |
def openai_chat_avatar(text_prompt):
|
| 306 |
response = client.chat.completions.create(
|
| 307 |
model="gpt-4o-mini",
|
|
|
|
| 380 |
|
| 381 |
voice_cloning = request.form.get('voice_cloning', 'no')
|
| 382 |
image_hardcoded = request.form.get('image_hardcoded', 'yes')
|
| 383 |
+
chat_model_used = request.form.get('chat_model_used', 'ryzedb')
|
| 384 |
target_language = request.form.get('target_language', 'original_text')
|
| 385 |
print('target_language',target_language)
|
| 386 |
pose_style = int(request.form.get('pose_style', 1))
|
|
|
|
| 393 |
preprocess = request.form.get('preprocess', 'crop')
|
| 394 |
print('preprocess selected: ',preprocess)
|
| 395 |
ref_pose_video = request.files.get('ref_pose', None)
|
| 396 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 397 |
|
| 398 |
if chat_model_used == 'ryzedb':
|
| 399 |
response = ryzedb_chat_avatar(text_prompt)
|
|
|
|
| 415 |
continue
|
| 416 |
|
| 417 |
else:
|
| 418 |
+
print("No Ryze database found")
|
|
|
|
|
|
|
|
|
|
| 419 |
|
| 420 |
source_image_path = save_uploaded_file(source_image, 'source_image.png',TEMP_DIR)
|
| 421 |
print(source_image_path)
|
| 422 |
+
|
|
|
|
|
|
|
| 423 |
if voice_cloning == 'no':
|
| 424 |
if voice_gender == 'male':
|
| 425 |
voice = 'echo'
|