|
|
import json |
|
|
import gzip |
|
|
import re |
|
|
import difflib |
|
|
import requests |
|
|
import xml.etree.ElementTree as ET |
|
|
import gradio as gr |
|
|
|
|
|
|
|
|
CHANNELS_URL = "https://raw.githubusercontent.com/pigzillaaa/daddylive/main/channels.json" |
|
|
US_EPG_URL = "https://epgshare01.online/epgshare01/epg_ripper_US2.xml.gz" |
|
|
US_LOCALS_URL = "https://epgshare01.online/epgshare01/epg_ripper_US_LOCALS2.xml.gz" |
|
|
CA_EPG_URL = "https://epgshare01.online/epgshare01/epg_ripper_CA2.xml.gz" |
|
|
|
|
|
|
|
|
RULES = { |
|
|
"A&E USA": "A.and.E.HD.East.us2", |
|
|
"AMC USA": "AMC.HD.us2", |
|
|
"Adult Swim": "AdultSwim.com.Cartoon.Network.us2", |
|
|
"BBC America (BBCA)": "BBC.America.HD.us2", |
|
|
"BBC News Channel HD": "BBC.News.(North.America).HD.us2", |
|
|
"BET USA": "BET.HD.us2", |
|
|
} |
|
|
|
|
|
|
|
|
def normalize(s): |
|
|
return re.sub(r'[^a-z0-9]', '', s.lower()) |
|
|
|
|
|
def load_epg_map(url): |
|
|
try: |
|
|
r = requests.get(url, timeout=20) |
|
|
r.raise_for_status() |
|
|
with gzip.decompress(r.content) as f: |
|
|
pass |
|
|
except Exception: |
|
|
f = gzip.decompress(r.content) |
|
|
root = ET.fromstring(f) |
|
|
return {ch.attrib.get("id", ""): ch.attrib.get("id", "") for ch in root.findall("channel")} |
|
|
|
|
|
def build_epg(): |
|
|
us = load_epg_map(US_EPG_URL) |
|
|
us_locals = load_epg_map(US_LOCALS_URL) |
|
|
ca = load_epg_map(CA_EPG_URL) |
|
|
|
|
|
epg = {**us, **us_locals, **ca} |
|
|
return epg |
|
|
|
|
|
def find_best_match(old_id, ch_name, epg_map): |
|
|
|
|
|
if ch_name in RULES: |
|
|
return RULES[ch_name] |
|
|
|
|
|
|
|
|
old_norm, name_norm = normalize(old_id), normalize(ch_name) |
|
|
norm_map = {normalize(k): v for k, v in epg_map.items() if k} |
|
|
|
|
|
|
|
|
if old_norm in norm_map: |
|
|
return norm_map[old_norm] |
|
|
|
|
|
if name_norm in norm_map: |
|
|
return norm_map[name_norm] |
|
|
|
|
|
|
|
|
all_norm_keys = list(norm_map.keys()) |
|
|
for candidate in difflib.get_close_matches(name_norm, all_norm_keys, n=1, cutoff=0.8): |
|
|
return norm_map[candidate] |
|
|
|
|
|
return None |
|
|
|
|
|
|
|
|
def update_channels(): |
|
|
r = requests.get(CHANNELS_URL) |
|
|
data = json.loads(r.text) |
|
|
|
|
|
epg_map = build_epg() |
|
|
|
|
|
log = [] |
|
|
for ch_name, info in data.items(): |
|
|
group = info.get("group_title", "") |
|
|
if group not in ["UNITED STATES", "CANADA"]: |
|
|
continue |
|
|
|
|
|
old_id = info.get("tvg_id", "") |
|
|
new_id = find_best_match(old_id, ch_name, epg_map) |
|
|
|
|
|
if new_id: |
|
|
if new_id != old_id: |
|
|
info["tvg_id"] = new_id |
|
|
log.append(f"✅ {group} | {ch_name} → {new_id}") |
|
|
else: |
|
|
log.append(f"⚠️ {group} | {ch_name} (no match, kept {old_id})") |
|
|
|
|
|
|
|
|
with open("channels_updated.json", "w") as f: |
|
|
json.dump(data, f, indent=4) |
|
|
|
|
|
return "\n".join(log), "channels_updated.json" |
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("## Project 2: Update Channels JSON with New EPG IDs") |
|
|
run_btn = gr.Button("Run Update") |
|
|
log_out = gr.Textbox(label="Update Log", lines=25) |
|
|
file_out = gr.File(label="Download Updated JSON") |
|
|
|
|
|
run_btn.click(fn=update_channels, outputs=[log_out, file_out]) |
|
|
|
|
|
demo.launch() |
|
|
|