ScriptSmith commited on
Commit
333630b
·
verified ·
1 Parent(s): 6423d43

Add files using upload-large-folder tool

Browse files
input/2024_video_ids.txt ADDED
The diff for this file is too large to render. See raw diff
 
input/2024_video_ids_nonblocked.txt ADDED
The diff for this file is too large to render. See raw diff
 
input/2024_video_ids_without_subtitles.txt ADDED
The diff for this file is too large to render. See raw diff
 
input/blocked.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ 374dC7UgPkU
2
+ YEqwd13B9Ac
3
+ KchyIfKx73c
4
+ kCaKn9DEjFc
5
+ 9h5BlrB7oTo
6
+ L8FqQrgx9Zc
7
+ Pk8f8RIXq9Y
input/channel_ids.txt ADDED
The diff for this file is too large to render. See raw diff
 
input/log_subs.txt ADDED
The diff for this file is too large to render. See raw diff
 
input/script.sh ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ mkdir -p data
4
+
5
+ file="$1"
6
+ start_id="$2"
7
+ skip=true
8
+
9
+ while read -r id; do
10
+ # If a start ID is provided, skip until we reach it
11
+ if [ -n "$start_id" ] && $skip; then
12
+ if [ "$id" = "$start_id" ]; then
13
+ skip=false
14
+ else
15
+ continue
16
+ fi
17
+ else
18
+ skip=false
19
+ fi
20
+
21
+ if [ ! -f "data/${id}.info.json" ]; then
22
+ echo "https://www.youtube.com/watch?v=$id"
23
+ else
24
+ echo "Skipping $id (metadata already downloaded)" >&2
25
+ fi
26
+ done < "$file" | yt-dlp \
27
+ --skip-download \
28
+ --write-info-json \
29
+ --write-subs \
30
+ --write-auto-subs \
31
+ --output "data/%(id)s" \
32
+ --sleep-interval 5 \
33
+ --sleep-requests 3 \
34
+ -a - \
35
+ 2>&1 | tee -a log.txt
36
+
input/script_channels.sh ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ # Track temp files for cleanup
5
+ tmpfiles=()
6
+
7
+ cleanup() {
8
+ for f in "${tmpfiles[@]}"; do
9
+ [ -f "$f" ] && rm -f "$f"
10
+ done
11
+ }
12
+ trap 'cleanup; exit 130' INT
13
+ trap 'cleanup' EXIT
14
+
15
+ mkdir -p channels
16
+
17
+ file="$1"
18
+ start_id="${2:-}"
19
+ skip=true
20
+
21
+ while read -r id; do
22
+ # Skip logic
23
+ if [ -n "$start_id" ] && $skip; then
24
+ if [ "$id" = "$start_id" ]; then
25
+ skip=false
26
+ else
27
+ continue
28
+ fi
29
+ fi
30
+
31
+ playlist_id="UU${id:2}"
32
+
33
+ outfile="channels/${id}.json"
34
+ tmpfile="${outfile}.tmp.$$"
35
+
36
+ if [ ! -f "$outfile" ]; then
37
+ echo "Fetching $id..."
38
+ tmpfiles+=("$tmpfile")
39
+
40
+ if yt-dlp \
41
+ -j \
42
+ --flat-playlist \
43
+ --lazy-playlist \
44
+ --extractor-arg "youtubetab:approximate_date" \
45
+ "https://www.youtube.com/playlist?list=$playlist_id" \
46
+ > "$tmpfile"; then
47
+ mv -f "$tmpfile" "$outfile"
48
+ # remove from tracked list since it's now promoted
49
+ tmpfiles=("${tmpfiles[@]/$tmpfile}")
50
+ else
51
+ echo "Failed to fetch $id" >&2
52
+ rm -f "$tmpfile"
53
+ tmpfiles=("${tmpfiles[@]/$tmpfile}")
54
+ fi
55
+ else
56
+ echo "Skipping $id (already exists)" >&2
57
+ fi
58
+ done < "$file"
input/script_subtitles.sh ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ mkdir -p data
4
+
5
+ file="$1"
6
+ start_id="$2"
7
+ skip=true
8
+
9
+ while read -r id; do
10
+ # If a start ID is provided, skip until we reach it
11
+ if [ -n "$start_id" ] && $skip; then
12
+ if [ "$id" = "$start_id" ]; then
13
+ skip=false
14
+ else
15
+ continue
16
+ fi
17
+ else
18
+ skip=false
19
+ fi
20
+
21
+ json="data/${id}.info.json"
22
+
23
+ # Require existing metadata
24
+ if [ ! -f "$json" ]; then
25
+ echo "Skipping $id (no metadata found)" >&2
26
+ continue
27
+ fi
28
+
29
+ # Skip if any vtt already exists
30
+ if compgen -G "data/${id}*.vtt" > /dev/null; then
31
+ echo "Skipping $id (subtitles already downloaded)" >&2
32
+ continue
33
+ fi
34
+
35
+ echo "Fetching subtitles for $id"
36
+
37
+ yt-dlp \
38
+ --skip-download \
39
+ --write-subs \
40
+ --write-auto-subs \
41
+ --output "data/%(id)s" \
42
+ --sleep-interval 5 \
43
+ --sleep-requests 3 \
44
+ --load-info-json "$json" \
45
+ 2>&1 | tee -a log_subs.txt
46
+
47
+ done < "$file"