cngsm commited on
Commit
ff14e23
·
verified ·
1 Parent(s): a970da4

Upload 2 files

Browse files
Files changed (2) hide show
  1. main.py.py +187 -0
  2. requirements.txt +7 -0
main.py.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from datetime import datetime
4
+ from utils.video_processor import VideoProcessor
5
+ from utils.mediapipe_utils import MediaPipeProcessor
6
+ from utils.huggingface_utils import HuggingFaceUploader
7
+
8
+ class LibrasProcessingPipeline:
9
+ def __init__(self, config):
10
+ self.config = config
11
+ self.setup_directories()
12
+
13
+ def setup_directories(self):
14
+ """Cria os diretórios necessários"""
15
+ os.makedirs(self.config['input_dir'], exist_ok=True)
16
+ os.makedirs(self.config['normalized_dir'], exist_ok=True)
17
+ os.makedirs(self.config['keypoints_dir'], exist_ok=True)
18
+ os.makedirs(self.config['huggingface_dir'], exist_ok=True)
19
+
20
+ def run_pipeline(self, video_filename):
21
+ """Executa o pipeline completo"""
22
+ print(f"Iniciando processamento do vídeo: {video_filename}")
23
+
24
+ # 1. Normalização do vídeo
25
+ print("1. Normalizando vídeo...")
26
+ normalized_path = self.normalize_video(video_filename)
27
+
28
+ # 2. Extração de keypoints com MediaPipe
29
+ print("2. Extraindo keypoints...")
30
+ keypoints_data = self.extract_keypoints(normalized_path)
31
+
32
+ # 3. Salvar JSON com keypoints
33
+ print("3. Salvando keypoints...")
34
+ json_path = self.save_keypoints(keypoints_data, video_filename)
35
+
36
+ # 4. Preparar para Hugging Face
37
+ print("4. Preparando para Hugging Face...")
38
+ hf_ready_path = self.prepare_for_huggingface(
39
+ normalized_path, json_path, video_filename
40
+ )
41
+
42
+ # 5. Upload para Hugging Face (opcional)
43
+ if self.config.get('upload_to_hf', False):
44
+ print("5. Upload para Hugging Face...")
45
+ self.upload_to_huggingface(hf_ready_path)
46
+
47
+ print("Pipeline concluído com sucesso!")
48
+ return {
49
+ 'normalized_video': normalized_path,
50
+ 'keypoints_json': json_path,
51
+ 'hf_ready': hf_ready_path
52
+ }
53
+
54
+ def normalize_video(self, video_filename):
55
+ """Normaliza o vídeo usando OpenCV/FFmpeg"""
56
+ processor = VideoProcessor(self.config)
57
+ input_path = os.path.join(self.config['input_dir'], video_filename)
58
+
59
+ # Verifica se o arquivo existe
60
+ if not os.path.exists(input_path):
61
+ raise FileNotFoundError(f"Vídeo não encontrado: {input_path}")
62
+
63
+ output_filename = f"normalized_{video_filename}"
64
+ output_path = os.path.join(self.config['normalized_dir'], output_filename)
65
+
66
+ return processor.normalize_video(input_path, output_path)
67
+
68
+ def extract_keypoints(self, video_path):
69
+ """Extrai keypoints usando MediaPipe"""
70
+ processor = MediaPipeProcessor(self.config)
71
+ return processor.process_video(video_path)
72
+
73
+ def save_keypoints(self, keypoints_data, original_filename):
74
+ """Salva os keypoints em formato JSON"""
75
+ base_name = os.path.splitext(original_filename)[0]
76
+ output_filename = f"{base_name}_keypoints.json"
77
+ output_path = os.path.join(self.config['keypoints_dir'], output_filename)
78
+
79
+ # Converter numpy arrays para listas para serialização JSON
80
+ serializable_data = []
81
+ for frame_data in keypoints_data:
82
+ serializable_frame = {}
83
+ for key, value in frame_data.items():
84
+ if hasattr(value, 'tolist'):
85
+ serializable_frame[key] = value.tolist()
86
+ else:
87
+ serializable_frame[key] = value
88
+ serializable_data.append(serializable_frame)
89
+
90
+ with open(output_path, 'w', encoding='utf-8') as f:
91
+ json.dump(serializable_data, f, indent=2, ensure_ascii=False)
92
+
93
+ return output_path
94
+
95
+ def prepare_for_huggingface(self, video_path, json_path, original_filename):
96
+ """Prepara os arquivos para upload no Hugging Face"""
97
+ base_name = os.path.splitext(original_filename)[0]
98
+ hf_dir = os.path.join(self.config['huggingface_dir'], base_name)
99
+ os.makedirs(hf_dir, exist_ok=True)
100
+
101
+ # Copiar vídeo normalizado
102
+ video_dest = os.path.join(hf_dir, f"{base_name}.mp4")
103
+ if os.path.exists(video_path):
104
+ import shutil
105
+ shutil.copy2(video_path, video_dest)
106
+
107
+ # Copiar JSON de keypoints
108
+ json_dest = os.path.join(hf_dir, f"{base_name}_keypoints.json")
109
+ if os.path.exists(json_path):
110
+ import shutil
111
+ shutil.copy2(json_path, json_dest)
112
+
113
+ # Criar metadata
114
+ metadata = {
115
+ 'processing_date': datetime.now().isoformat(),
116
+ 'original_video': original_filename,
117
+ 'keypoints_format': 'MediaPipe Holistic',
118
+ 'frame_count': len(self.load_json(json_path)),
119
+ 'video_resolution': self.get_video_info(video_path)
120
+ }
121
+
122
+ metadata_path = os.path.join(hf_dir, 'metadata.json')
123
+ with open(metadata_path, 'w', encoding='utf-8') as f:
124
+ json.dump(metadata, f, indent=2, ensure_ascii=False)
125
+
126
+ return hf_dir
127
+
128
+ def upload_to_huggingface(self, directory_path):
129
+ """Faz upload para o Hugging Face Space"""
130
+ uploader = HuggingFaceUploader(self.config)
131
+ return uploader.upload_directory(directory_path)
132
+
133
+ def load_json(self, json_path):
134
+ """Carrega arquivo JSON"""
135
+ with open(json_path, 'r', encoding='utf-8') as f:
136
+ return json.load(f)
137
+
138
+ def get_video_info(self, video_path):
139
+ """Obtém informações do vídeo"""
140
+ import cv2
141
+ cap = cv2.VideoCapture(video_path)
142
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
143
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
144
+ fps = cap.get(cv2.CAP_PROP_FPS)
145
+ cap.release()
146
+ return {'width': width, 'height': height, 'fps': fps}
147
+
148
+ # Configuração
149
+ config = {
150
+ 'input_dir': 'input',
151
+ 'normalized_dir': 'output/normalized',
152
+ 'keypoints_dir': 'output/keypoints',
153
+ 'huggingface_dir': 'output/huggingface',
154
+
155
+ 'video_normalization': {
156
+ 'target_resolution': (640, 480),
157
+ 'target_fps': 30,
158
+ 'normalize_brightness': True,
159
+ 'enhance_contrast': True
160
+ },
161
+
162
+ 'mediapipe_config': {
163
+ 'static_image_mode': False,
164
+ 'model_complexity': 1,
165
+ 'smooth_landmarks': True,
166
+ 'min_detection_confidence': 0.5,
167
+ 'min_tracking_confidence': 0.5
168
+ },
169
+
170
+ 'huggingface': {
171
+ 'repo_id': 'your-username/your-repo-name',
172
+ 'token': 'your-hf-token', # Opcional: só se quiser upload automático
173
+ 'upload_to_hf': False # Mudar para True para habilitar upload
174
+ }
175
+ }
176
+
177
+ if __name__ == "__main__":
178
+ # Exemplo de uso
179
+ pipeline = LibrasProcessingPipeline(config)
180
+
181
+ # Processar um vídeo específico
182
+ try:
183
+ result = pipeline.run_pipeline("video_libras.mp4")
184
+ print(f"Resultados salvos em: {result}")
185
+
186
+ except Exception as e:
187
+ print(f"Erro no processamento: {e}")
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ opencv-python==4.8.0.76
2
+ mediapipe==0.10.0
3
+ ffmpeg-python==0.2.0
4
+ numpy==1.24.3
5
+ tqdm==4.65.0
6
+ huggingface_hub==0.16.4
7
+ requests==2.28.2