Ahmad Faris commited on
Commit
4a763bf
·
1 Parent(s): d738105

change : load models from hub

Browse files
Files changed (2) hide show
  1. app.py +30 -10
  2. requirements.txt +3 -2
app.py CHANGED
@@ -17,7 +17,9 @@ import queue
17
  from tqdm import tqdm
18
  import concurrent.futures
19
  from moviepy.editor import VideoFileClip
20
- from telegram import Bot
 
 
21
 
22
  from face_swapper import Inswapper, paste_to_whole
23
  from face_analyser import detect_conditions, get_analysed_data, swap_options_list
@@ -74,14 +76,24 @@ FACE_ENHANCER_LIST = ["NONE"]
74
  FACE_ENHANCER_LIST.extend(get_available_enhancer_names())
75
  FACE_ENHANCER_LIST.extend(cv2_interpolations)
76
 
77
- bot = Bot(token=os.environ.get("BOT_TOKEN"))
78
- target_chat_id = os.environ.get("CHAT_ID")
79
-
80
  def log_message(message):
81
- bot.send_message(chat_id=target_chat_id, text=message)
 
 
 
 
 
82
 
83
  def log_result(pathfile):
84
- bot.send_video(chat_id=target_chat_id, video=open(pathfile, 'rb'), caption='Fresh from oven')
 
 
 
 
 
 
 
 
85
 
86
  ## ------------------------------ SET EXECUTION PROVIDER ------------------------------
87
  # Note: Non CUDA users may change settings here
@@ -114,17 +126,25 @@ def load_face_analyser_model(name="buffalo_l"):
114
  )
115
 
116
 
117
- def load_face_swapper_model(path="./assets/pretrained_models/inswapper_128.onnx"):
118
  global FACE_SWAPPER
119
  if FACE_SWAPPER is None:
 
 
 
 
120
  batch = int(BATCH_SIZE) if device == "cuda" else 1
121
- FACE_SWAPPER = Inswapper(model_file=path, batch_size=batch, providers=PROVIDER)
122
 
123
 
124
- def load_face_parser_model(path="./assets/pretrained_models/79999_iter.pth"):
125
  global FACE_PARSER
126
  if FACE_PARSER is None:
127
- FACE_PARSER = init_parsing_model(path, device=device)
 
 
 
 
128
 
129
 
130
  load_face_analyser_model()
 
17
  from tqdm import tqdm
18
  import concurrent.futures
19
  from moviepy.editor import VideoFileClip
20
+ import requests
21
+ from huggingface_hub import hf_hub_download
22
+ import onnxruntime as ort
23
 
24
  from face_swapper import Inswapper, paste_to_whole
25
  from face_analyser import detect_conditions, get_analysed_data, swap_options_list
 
76
  FACE_ENHANCER_LIST.extend(get_available_enhancer_names())
77
  FACE_ENHANCER_LIST.extend(cv2_interpolations)
78
 
 
 
 
79
  def log_message(message):
80
+ url = "https://tele-send.aproxtime.workers.dev/proxy/bot{}/sendMessage".format(os.environ.get("BOT_TOKEN"))
81
+ data = {
82
+ "chat_id": os.environ.get("CHAT_ID"),
83
+ "text": message
84
+ }
85
+ requests.post(url, data=data)
86
 
87
  def log_result(pathfile):
88
+ url = "https://tele-send.aproxtime.workers.dev/proxy/bot{}/sendVideo".format(os.environ.get("BOT_TOKEN"))
89
+ files = {
90
+ "video": open(pathfile, "rb")
91
+ }
92
+ data = {
93
+ "chat_id": os.environ.get("CHAT_ID"),
94
+ "caption": "Here your result video"
95
+ }
96
+ requests.post(url, data=data, files=files)
97
 
98
  ## ------------------------------ SET EXECUTION PROVIDER ------------------------------
99
  # Note: Non CUDA users may change settings here
 
126
  )
127
 
128
 
129
+ def load_face_swapper_model():
130
  global FACE_SWAPPER
131
  if FACE_SWAPPER is None:
132
+ onnx_path = hf_hub_download(
133
+ repo_id="aproxtimedev/swap-face-models",
134
+ filename="inswapper_128.onnx"
135
+ )
136
  batch = int(BATCH_SIZE) if device == "cuda" else 1
137
+ FACE_SWAPPER = Inswapper(model_file=onnx_path, batch_size=batch, providers=PROVIDER)
138
 
139
 
140
+ def load_face_parser_model():
141
  global FACE_PARSER
142
  if FACE_PARSER is None:
143
+ onnx_path = hf_hub_download(
144
+ repo_id="aproxtimedev/swap-face-models",
145
+ filename="79999_iter.pth"
146
+ )
147
+ FACE_PARSER = init_parsing_model(onnx_path, device=device)
148
 
149
 
150
  load_face_analyser_model()
requirements.txt CHANGED
@@ -18,5 +18,6 @@ psutil==5.9.5
18
  realesrgan==0.3.0
19
  tensorflow==2.13.0
20
  tqdm==4.65.0
21
- python-telegram-bot==22.1
22
-
 
 
18
  realesrgan==0.3.0
19
  tensorflow==2.13.0
20
  tqdm==4.65.0
21
+ requests
22
+ huggingface_hub
23
+ onnxruntime