tcm03 commited on
Commit
597f824
Β·
1 Parent(s): 5a1acba

Restructure repo and refine annotation

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. .gitignore +3 -1
  2. annotate.py +0 -90
  3. annotation/__pycache__/annotate.cpython-310.pyc +0 -0
  4. annotation/__pycache__/datatypes.cpython-310.pyc +0 -0
  5. annotation/__pycache__/utils.cpython-310.pyc +0 -0
  6. annotation/annotate.py +54 -0
  7. annotation/datatypes.py +4 -0
  8. annotation/train_test.py +82 -0
  9. annotation/utils.py +62 -0
  10. {0 β†’ data/0}/0IB8fKoY-eo.mp4 +0 -0
  11. {0 β†’ data/0}/0f6p3EUUgE8.mp4 +0 -0
  12. {0 β†’ data/0}/0n_4y7tHVoo.mp4 +0 -0
  13. {0 β†’ data/0}/0yP5daOvO1I.mp4 +0 -0
  14. {0 β†’ data/0}/4TOR13Q3b8U.mp4 +0 -0
  15. {0 β†’ data/0}/4Xy0QA91XVc.mp4 +0 -0
  16. {0 β†’ data/0}/4tnUeJ1EZ2g.mp4 +0 -0
  17. {0 β†’ data/0}/4u0HBLuD_w8.mp4 +0 -0
  18. {0 β†’ data/0}/5SO7dnP6wQw.mp4 +0 -0
  19. {0 β†’ data/0}/647IkQZq5ag.mp4 +0 -0
  20. {0 β†’ data/0}/69kJ6oCUq5Q.mp4 +0 -0
  21. {0 β†’ data/0}/6q_sAjeaJ6g.mp4 +0 -0
  22. {0 β†’ data/0}/7z4wl_wEtsk.mp4 +0 -0
  23. {0 β†’ data/0}/8KNAJkQUsqE.mp4 +0 -0
  24. {0 β†’ data/0}/8VNwSM4VZkU.mp4 +0 -0
  25. {0 β†’ data/0}/8XTseevKCtg.mp4 +0 -0
  26. {0 β†’ data/0}/8aqzHOD1GCI.mp4 +0 -0
  27. {0 β†’ data/0}/8pSwuDE12kE.mp4 +0 -0
  28. {0 β†’ data/0}/8pWOQ5s0drU.mp4 +0 -0
  29. {0 β†’ data/0}/8qYRPfCnS54.mp4 +0 -0
  30. {0 β†’ data/0}/91YfaydaRh0.mp4 +0 -0
  31. {0 β†’ data/0}/9Ty3-VnMWVY.mp4 +0 -0
  32. {0 β†’ data/0}/9kmzkIC8vcc.mp4 +0 -0
  33. {0 β†’ data/0}/9ofnmfKMO7E.mp4 +0 -0
  34. {0 β†’ data/0}/9ySy7KKeNwE.mp4 +0 -0
  35. {0 β†’ data/0}/ABh_KPtPw2E.mp4 +0 -0
  36. {0 β†’ data/0}/AJ1JUYqdO38.mp4 +0 -0
  37. {0 β†’ data/0}/ALSWCInMms4.mp4 +0 -0
  38. {0 β†’ data/0}/AN1xz5fK6S8.mp4 +0 -0
  39. {0 β†’ data/0}/B3MQKYdzjhE.mp4 +0 -0
  40. {0 β†’ data/0}/B6vDzVrBcTs.mp4 +0 -0
  41. {0 β†’ data/0}/B9Laugq_Q9I.mp4 +0 -0
  42. {0 β†’ data/0}/BV3rYuTZx6E.mp4 +0 -0
  43. {0 β†’ data/0}/BW0V-_Et3vA.mp4 +0 -0
  44. {0 β†’ data/0}/BsS0KTvXQCM.mp4 +0 -0
  45. {0 β†’ data/0}/BvpSP2BRR60.mp4 +0 -0
  46. {0 β†’ data/0}/CHTIo_RA8p0.mp4 +0 -0
  47. {0 β†’ data/0}/CTZrrgUnhUo.mp4 +0 -0
  48. {0 β†’ data/0}/DDZ88CQEGZE.mp4 +0 -0
  49. {0 β†’ data/0}/DDo1aCul9Wc.mp4 +0 -0
  50. {0 β†’ data/0}/DOexTV2xoH4.mp4 +0 -0
.gitignore CHANGED
@@ -1,2 +1,4 @@
1
  preprocessing/__pycache__/
2
- preprocessing/vision_encoders/__pycache__/
 
 
 
1
  preprocessing/__pycache__/
2
+ preprocessing/vision_encoders/__pycache__/
3
+ annotations/__pycache__/
4
+ .vscode/
annotate.py DELETED
@@ -1,90 +0,0 @@
1
- import json
2
- import os
3
- from typing import List, Union, Dict, Any, Callable
4
- import decord as de
5
- import argparse
6
- from concurrent.futures import ThreadPoolExecutor
7
-
8
- VideoAnnotation = Optional[Dict[str, Union[str, List[Dict[str, str]]]]]
9
-
10
- def extract_label(path: str) -> str:
11
- idx = len(path) - 1
12
- while idx >= 0:
13
- if path[idx].isnumeric():
14
- return path[idx]
15
- idx -= 1
16
- return '-1'
17
-
18
- def get_duration(path: str) -> int:
19
- vr = de.VideoReader(path, ctx=de.cpu(0), num_threads=1)
20
- return int(len(vr) / vr.get_avg_fps())
21
-
22
- def filter_video(path: str, **kwargs) -> bool:
23
- if 'max_duration' in kwargs:
24
- return get_duration(path) <= kwargs['max_duration']
25
- return True
26
-
27
- def annotate_video(
28
- file_path: str,
29
- **kwargs
30
- ) -> VideoAnnotation:
31
- if not video_filter(file_path, **kwargs):
32
- return None
33
- json_content: VideoAnnotation = {
34
- 'video': file_path,
35
- 'label': label,
36
- 'conversations': [
37
- {
38
- 'from': 'human',
39
- 'value': '<image>\nThis video is a Youtube video on one of the following categories: Education, Film & Animation, Comedy, Entertainment, Music, Howto & Style, and People & Blogs. The engagement rate defined for each such video is based on the number of potential likes and dislikes only when published on Youtube. The exact formula for the score is (likes-dislikes) / (likes+dislikes) and the final prediction label is either 0 (not engaged), 1 (neutral), or 2 (engaged) based on thresholding this score. Please predict one of the three labels for this video, based on its contents only.'
40
- },
41
- {
42
- 'from': 'gpt',
43
- 'value': f'The engagement label of the video is {label}.'
44
- }
45
- ]
46
- }
47
- return json_content
48
-
49
- def dump_json(
50
- folder_paths: List[str],
51
- video_filter: Callable[[str, Any], bool] = lambda path: True,
52
- **kwargs
53
- ) -> List[VideoAnnotation]:
54
- json_contents: List[VideoAnnotation] = []
55
-
56
- for folder_path in folder_paths:
57
- label: str = extract_label(folder_path)
58
- assert label != '-1', f"Invalid folder path: {folder_path}"
59
- for file_name in os.listdir(folder_path):
60
- # file_path: str = os.path.join(folder_path, file_name) # join on Windows cannot run not Linux
61
- file_path: str = folder_path + file_name
62
- json_content = annotate_video(file_path, **kwargs)
63
- if json_content is not None:
64
- json_contents.append(json_content)
65
-
66
- return json_contents
67
-
68
- if __name__ == "__main__":
69
-
70
- parser = argparse.ArgumentParser(
71
- prog = 'annotate',
72
- description='Annotate video dataset with JSON format'
73
- )
74
- parser.add_argument('--output_file', type=str, default='EnTube.json', help='Output JSON file')
75
- parser.add_argument('--max_duration', type=int, help='Maximum duration of video in seconds')
76
- args = parser.parse_args()
77
-
78
- folder_paths: List[str] = [
79
- # os.path.join(os.getcwd(), '0'),
80
- '0/',
81
- # os.path.join(os.getcwd(), '1'),
82
- '1/',
83
- # os.path.join(os.getcwd(), '2'),
84
- '2/',
85
- ]
86
-
87
- json_contents: List[VideoAnnotation] = dump_json(folder_paths, filter_video, **vars(args))
88
-
89
- with open(args.output_file, 'w') as f:
90
- json.dump(json_contents, f, indent=4)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
annotation/__pycache__/annotate.cpython-310.pyc ADDED
Binary file (2.23 kB). View file
 
annotation/__pycache__/datatypes.cpython-310.pyc ADDED
Binary file (346 Bytes). View file
 
annotation/__pycache__/utils.cpython-310.pyc ADDED
Binary file (2.3 kB). View file
 
annotation/annotate.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from typing import List, Union, Dict, Any, Callable, Optional
4
+ from concurrent.futures import ThreadPoolExecutor, as_completed
5
+ from datatypes import VideoAnnotation, Metadata
6
+ from utils import get_optimal_workers, extract_label, convert_to_linux_path
7
+
8
+
9
+ def annotate_video(
10
+ file_path: str,
11
+ label: str,
12
+ video_filter: Callable[[str, Any], bool] = lambda path: True,
13
+ **kwargs
14
+ ) -> VideoAnnotation:
15
+ if not video_filter(file_path, **kwargs):
16
+ return None
17
+ # print(f'Begin annotating {file_path}...')
18
+ json_content: VideoAnnotation = {
19
+ 'video': convert_to_linux_path(file_path),
20
+ 'label': label,
21
+ 'conversations': [
22
+ {
23
+ 'from': 'human',
24
+ 'value': '<image>\nThis video is a Youtube video on one of many categories such as Education, Film & Animation, Comedy, Entertainment, Music, Howto & Style, and People & Blogs, etc. The engagement rate defined for each such video is based on the number of potential likes and dislikes only when published on Youtube. The higher number of likes and lower number of dislikes, the more engaged the video is. The final prediction label is either 0 (not engaged), 1 (neutral), or 2 (engaged). Please predict one of the three labels for this video, based on its contents only.'
25
+ },
26
+ {
27
+ 'from': 'gpt',
28
+ 'value': f'The engagement label of the video is {label}.'
29
+ }
30
+ ]
31
+ }
32
+ return json_content
33
+
34
+
35
+
36
+ def dump_json(
37
+ metadata: Metadata,
38
+ video_filter: Callable[[str, Any], bool] = lambda path: True,
39
+ **kwargs
40
+ ) -> List[VideoAnnotation]:
41
+ print(f'Annotating {len(metadata)} videos...')
42
+ json_contents: List[VideoAnnotation] = []
43
+
44
+ with ThreadPoolExecutor(max_workers=get_optimal_workers()) as executor:
45
+ futures = []
46
+ for (file_path, label) in metadata:
47
+ futures.append(executor.submit(annotate_video, file_path, label, video_filter=video_filter, **kwargs))
48
+
49
+ for future in as_completed(futures):
50
+ result = future.result()
51
+ if result:
52
+ json_contents.append(result)
53
+
54
+ return json_contents
annotation/datatypes.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from typing import List, Union, Dict, Tuple, Callable, Optional
2
+
3
+ VideoAnnotation = Optional[Dict[str, Union[str, List[Dict[str, str]]]]]
4
+ Metadata = List[Tuple[str, str]]
annotation/train_test.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import argparse
4
+ from sklearn.model_selection import train_test_split
5
+ from datatypes import VideoAnnotation, Metadata
6
+ from annotate import dump_json
7
+ from utils import get_metadata, filter_video
8
+ from typing import List
9
+
10
+
11
+
12
+ if __name__ == "__main__":
13
+
14
+ parser = argparse.ArgumentParser(
15
+ prog = 'train_test.py',
16
+ description='Annotate video dataset with JSON format'
17
+ )
18
+ parser.add_argument(
19
+ '--folders',
20
+ type = str,
21
+ nargs = '+',
22
+ required = True,
23
+ help = "List of folder paths to video data"
24
+ )
25
+ parser.add_argument(
26
+ '--train_size',
27
+ type=float,
28
+ default=0.8,
29
+ help='Proportion of the dataset for training'
30
+ )
31
+ parser.add_argument(
32
+ '--output_train_file',
33
+ type=str,
34
+ default='data/EnTube_train.json',
35
+ help='Output JSON file for training'
36
+ )
37
+ parser.add_argument(
38
+ '--output_test_file',
39
+ type=str,
40
+ default='data/EnTube_test.json',
41
+ help='Output JSON file for testing'
42
+ )
43
+ parser.add_argument(
44
+ '--max_duration',
45
+ type=int,
46
+ help='Maximum duration of video in seconds'
47
+ )
48
+ parser.add_argument(
49
+ '--random_state',
50
+ type=int,
51
+ default=42,
52
+ help='Random seed for train-test split'
53
+ )
54
+ args = parser.parse_args()
55
+
56
+ folder_paths: List[str] = args.folders
57
+ metadata: Metadata = get_metadata(folder_paths)
58
+ # split metadata into 3 submetadata corresponding to 3 labels
59
+ metadata_label = {0: [], 1: [], 2: []}
60
+ for video, label in metadata:
61
+ metadata_label[int(label)].append((video, label))
62
+ train = []
63
+ test = []
64
+ for label, videos in metadata_label.items():
65
+ train_l, test_l = train_test_split(
66
+ videos,
67
+ train_size=args.train_size,
68
+ random_state=args.random_state
69
+ )
70
+ print(f'Label {label}: {len(train_l)} training videos, {len(test_l)} testing videos')
71
+ train.extend(train_l)
72
+ test.extend(test_l)
73
+
74
+ json_train: List[VideoAnnotation] = dump_json(train, filter_video, **vars(args))
75
+ json_test: List[VideoAnnotation] = dump_json(test, filter_video, **vars(args))
76
+
77
+ with open(args.output_train_file, 'w') as f:
78
+ json.dump(json_train, f, indent=4)
79
+ print(f"Training data saved to {args.output_train_file}")
80
+ with open(args.output_test_file, 'w') as f:
81
+ json.dump(json_test, f, indent=4)
82
+ print(f"Testing data saved to {args.output_test_file}")
annotation/utils.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import decord as de
2
+ from datatypes import Metadata
3
+ from typing import List
4
+ import os
5
+ from multiprocessing import cpu_count
6
+ import traceback
7
+ from pathlib import Path
8
+
9
+ def convert_to_linux_path(path: str) -> str:
10
+ return Path(path).as_posix()
11
+
12
+ def extract_label(path: str) -> str:
13
+ idx = len(path) - 1
14
+ while idx >= 0:
15
+ if path[idx].isnumeric():
16
+ return path[idx]
17
+ idx -= 1
18
+ return '-1'
19
+
20
+ def get_duration(path: str) -> int:
21
+ try:
22
+ vr = de.VideoReader(path, ctx=de.cpu(0), num_threads=1)
23
+ return int(len(vr) / vr.get_avg_fps())
24
+ except Exception as e:
25
+ print(f"Error reading video {path}: {e}")
26
+ print(traceback.format_exc()) # Include the full traceback for debugging
27
+ return -1 # Use -1 to indicate an invalid duration
28
+
29
+ def filter_video(path: str, **kwargs) -> bool:
30
+ try:
31
+ max_duration = kwargs.get('max_duration', None)
32
+ if max_duration is not None:
33
+ duration = get_duration(path)
34
+ if duration == -1: # Handle invalid duration
35
+ print(f"Skipping invalid video: {path}")
36
+ return False
37
+ return duration <= max_duration
38
+ return True
39
+ except Exception as e:
40
+ print(f"Error in filter_video for {path}: {e}")
41
+ return False
42
+
43
+ def get_optimal_workers() -> int:
44
+ """Determine the optimal number of workers based on available CPU cores."""
45
+ try:
46
+ return max(1, cpu_count() - 1) # Leave one core free
47
+ except (NotImplementedError, ValueError):
48
+ return 1 # Fallback to a single worker in case of an error
49
+
50
+ def get_metadata(
51
+ folder_paths: List[str]
52
+ ) -> Metadata:
53
+ metadata: Metadata = []
54
+ for folder_path in folder_paths:
55
+ label: str = extract_label(folder_path)
56
+ assert label != '-1', f"Invalid folder path: {folder_path}"
57
+ for file_name in os.listdir(folder_path):
58
+ file_path: str = os.path.join(folder_path.rstrip('/'), file_name)
59
+ if os.path.exists(file_path) and os.path.isfile(file_path):
60
+ metadata.append((file_path, label))
61
+ print(f'Found {len(metadata)} videos')
62
+ return metadata
{0 β†’ data/0}/0IB8fKoY-eo.mp4 RENAMED
File without changes
{0 β†’ data/0}/0f6p3EUUgE8.mp4 RENAMED
File without changes
{0 β†’ data/0}/0n_4y7tHVoo.mp4 RENAMED
File without changes
{0 β†’ data/0}/0yP5daOvO1I.mp4 RENAMED
File without changes
{0 β†’ data/0}/4TOR13Q3b8U.mp4 RENAMED
File without changes
{0 β†’ data/0}/4Xy0QA91XVc.mp4 RENAMED
File without changes
{0 β†’ data/0}/4tnUeJ1EZ2g.mp4 RENAMED
File without changes
{0 β†’ data/0}/4u0HBLuD_w8.mp4 RENAMED
File without changes
{0 β†’ data/0}/5SO7dnP6wQw.mp4 RENAMED
File without changes
{0 β†’ data/0}/647IkQZq5ag.mp4 RENAMED
File without changes
{0 β†’ data/0}/69kJ6oCUq5Q.mp4 RENAMED
File without changes
{0 β†’ data/0}/6q_sAjeaJ6g.mp4 RENAMED
File without changes
{0 β†’ data/0}/7z4wl_wEtsk.mp4 RENAMED
File without changes
{0 β†’ data/0}/8KNAJkQUsqE.mp4 RENAMED
File without changes
{0 β†’ data/0}/8VNwSM4VZkU.mp4 RENAMED
File without changes
{0 β†’ data/0}/8XTseevKCtg.mp4 RENAMED
File without changes
{0 β†’ data/0}/8aqzHOD1GCI.mp4 RENAMED
File without changes
{0 β†’ data/0}/8pSwuDE12kE.mp4 RENAMED
File without changes
{0 β†’ data/0}/8pWOQ5s0drU.mp4 RENAMED
File without changes
{0 β†’ data/0}/8qYRPfCnS54.mp4 RENAMED
File without changes
{0 β†’ data/0}/91YfaydaRh0.mp4 RENAMED
File without changes
{0 β†’ data/0}/9Ty3-VnMWVY.mp4 RENAMED
File without changes
{0 β†’ data/0}/9kmzkIC8vcc.mp4 RENAMED
File without changes
{0 β†’ data/0}/9ofnmfKMO7E.mp4 RENAMED
File without changes
{0 β†’ data/0}/9ySy7KKeNwE.mp4 RENAMED
File without changes
{0 β†’ data/0}/ABh_KPtPw2E.mp4 RENAMED
File without changes
{0 β†’ data/0}/AJ1JUYqdO38.mp4 RENAMED
File without changes
{0 β†’ data/0}/ALSWCInMms4.mp4 RENAMED
File without changes
{0 β†’ data/0}/AN1xz5fK6S8.mp4 RENAMED
File without changes
{0 β†’ data/0}/B3MQKYdzjhE.mp4 RENAMED
File without changes
{0 β†’ data/0}/B6vDzVrBcTs.mp4 RENAMED
File without changes
{0 β†’ data/0}/B9Laugq_Q9I.mp4 RENAMED
File without changes
{0 β†’ data/0}/BV3rYuTZx6E.mp4 RENAMED
File without changes
{0 β†’ data/0}/BW0V-_Et3vA.mp4 RENAMED
File without changes
{0 β†’ data/0}/BsS0KTvXQCM.mp4 RENAMED
File without changes
{0 β†’ data/0}/BvpSP2BRR60.mp4 RENAMED
File without changes
{0 β†’ data/0}/CHTIo_RA8p0.mp4 RENAMED
File without changes
{0 β†’ data/0}/CTZrrgUnhUo.mp4 RENAMED
File without changes
{0 β†’ data/0}/DDZ88CQEGZE.mp4 RENAMED
File without changes
{0 β†’ data/0}/DDo1aCul9Wc.mp4 RENAMED
File without changes
{0 β†’ data/0}/DOexTV2xoH4.mp4 RENAMED
File without changes