File size: 3,249 Bytes
0a63398
 
6fd3e73
 
da8bea4
 
 
0a63398
da8bea4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6fd3e73
 
 
da8bea4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6fd3e73
da8bea4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6fd3e73
da8bea4
6fd3e73
da8bea4
 
 
 
6fd3e73
da8bea4
6fd3e73
da8bea4
 
 
 
 
6fd3e73
da8bea4
 
6fd3e73
da8bea4
 
 
 
 
 
0a63398
da8bea4
 
 
0a63398
da8bea4
0a63398
da8bea4
 
 
 
 
 
0a63398
da8bea4
0a63398
da8bea4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import json
import gzip
import re
import difflib
import requests
import xml.etree.ElementTree as ET
import gradio as gr

# --- Sources ---
CHANNELS_URL = "https://raw.githubusercontent.com/pigzillaaa/daddylive/main/channels.json"
US_EPG_URL = "https://epgshare01.online/epgshare01/epg_ripper_US2.xml.gz"
US_LOCALS_URL = "https://epgshare01.online/epgshare01/epg_ripper_US_LOCALS2.xml.gz"
CA_EPG_URL = "https://epgshare01.online/epgshare01/epg_ripper_CA2.xml.gz"

# --- Manual Rules (learned fixes) ---
RULES = {
    "A&E USA": "A.and.E.HD.East.us2",
    "AMC USA": "AMC.HD.us2",
    "Adult Swim": "AdultSwim.com.Cartoon.Network.us2",
    "BBC America (BBCA)": "BBC.America.HD.us2",
    "BBC News Channel HD": "BBC.News.(North.America).HD.us2",
    "BET USA": "BET.HD.us2",
}

# --- Helpers ---
def normalize(s):
    return re.sub(r'[^a-z0-9]', '', s.lower())

def load_epg_map(url):
    try:
        r = requests.get(url, timeout=20)
        r.raise_for_status()
        with gzip.decompress(r.content) as f:
            pass
    except Exception:
        f = gzip.decompress(r.content)
    root = ET.fromstring(f)
    return {ch.attrib.get("id", ""): ch.attrib.get("id", "") for ch in root.findall("channel")}

def build_epg():
    us = load_epg_map(US_EPG_URL)
    us_locals = load_epg_map(US_LOCALS_URL)
    ca = load_epg_map(CA_EPG_URL)
    # Merge maps
    epg = {**us, **us_locals, **ca}
    return epg

def find_best_match(old_id, ch_name, epg_map):
    # Rule-based override first
    if ch_name in RULES:
        return RULES[ch_name]

    # Normalize input
    old_norm, name_norm = normalize(old_id), normalize(ch_name)
    norm_map = {normalize(k): v for k, v in epg_map.items() if k}

    # Try old_id
    if old_norm in norm_map:
        return norm_map[old_norm]
    # Try channel name
    if name_norm in norm_map:
        return norm_map[name_norm]

    # Fuzzy matching
    all_norm_keys = list(norm_map.keys())
    for candidate in difflib.get_close_matches(name_norm, all_norm_keys, n=1, cutoff=0.8):
        return norm_map[candidate]

    return None

# --- Main Update ---
def update_channels():
    r = requests.get(CHANNELS_URL)
    data = json.loads(r.text)

    epg_map = build_epg()

    log = []
    for ch_name, info in data.items():
        group = info.get("group_title", "")
        if group not in ["UNITED STATES", "CANADA"]:
            continue

        old_id = info.get("tvg_id", "")
        new_id = find_best_match(old_id, ch_name, epg_map)

        if new_id:
            if new_id != old_id:
                info["tvg_id"] = new_id
                log.append(f"✅ {group} | {ch_name}{new_id}")
        else:
            log.append(f"⚠️ {group} | {ch_name} (no match, kept {old_id})")

    # Save updated JSON
    with open("channels_updated.json", "w") as f:
        json.dump(data, f, indent=4)

    return "\n".join(log), "channels_updated.json"

# --- Gradio UI ---
with gr.Blocks() as demo:
    gr.Markdown("## Project 2: Update Channels JSON with New EPG IDs")
    run_btn = gr.Button("Run Update")
    log_out = gr.Textbox(label="Update Log", lines=25)
    file_out = gr.File(label="Download Updated JSON")

    run_btn.click(fn=update_channels, outputs=[log_out, file_out])

demo.launch()