File size: 12,727 Bytes
4f9adc4
 
 
bb3e8ea
 
 
 
4f9adc4
 
 
 
 
 
 
 
 
 
 
 
 
 
bb3e8ea
 
 
 
 
4f9adc4
 
 
 
 
 
 
bb3e8ea
 
 
4f9adc4
bb3e8ea
4f9adc4
bb3e8ea
4f9adc4
 
bb3e8ea
4f9adc4
 
bb3e8ea
4f9adc4
bb3e8ea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4f9adc4
 
 
 
 
 
 
 
bb3e8ea
4f9adc4
 
 
 
 
 
 
 
 
 
 
 
bb3e8ea
 
4f9adc4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bb3e8ea
4f9adc4
 
 
 
 
bb3e8ea
4f9adc4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bb3e8ea
 
 
 
 
 
 
4f9adc4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bb3e8ea
 
 
 
 
 
4f9adc4
 
 
bb3e8ea
 
4f9adc4
 
 
 
 
 
bb3e8ea
 
4f9adc4
bb3e8ea
 
4f9adc4
bb3e8ea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4f9adc4
 
 
 
bb3e8ea
4f9adc4
bb3e8ea
 
 
 
 
 
 
4f9adc4
bb3e8ea
4f9adc4
 
 
 
bb3e8ea
 
4f9adc4
 
bb3e8ea
4f9adc4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bb3e8ea
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
"""
Web scraper for finding college football games on nfl-video.com.

This module uses team-specific pages for efficient searching:
1. Builds an index of team names to their dedicated pages
2. Fetches games directly from the team's page
3. Optionally filters by opponent (team_b)
"""

import logging
import re
import time
from typing import Optional

import requests
from bs4 import BeautifulSoup

from .models import GameResult, SearchResults

logger = logging.getLogger(__name__)

# Base URL for college football section
BASE_URL = "https://nfl-video.com/cfb"

# Main page with team links in sidebar
MAIN_PAGE_URL = "https://nfl-video.com/cfb/ncaa_college_football_highlights_games_replay/ncaa_college_football_full_game_replays/2"

# User agent to avoid being blocked
HEADERS = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"}

# Delay between page requests to be polite to the server
REQUEST_DELAY_SECONDS = 0.5

# Cache for team index (team name -> URL mapping)
_team_index_cache: Optional[dict[str, str]] = None  # pylint: disable=invalid-name


def _fetch_page(url: str) -> Optional[BeautifulSoup]:
    """
    Fetch a page and return its parsed HTML.

    Args:
        url: URL to fetch

    Returns:
        BeautifulSoup object or None if fetch failed
    """
    try:
        logger.debug("Fetching URL: %s", url)
        response = requests.get(url, headers=HEADERS, timeout=30)
        response.raise_for_status()
        return BeautifulSoup(response.text, "html.parser")
    except requests.RequestException as e:
        logger.error("Failed to fetch %s: %s", url, e)
        return None


def _build_team_index() -> dict[str, str]:
    """
    Build an index mapping team names to their dedicated page URLs.

    Scrapes the main page sidebar to find all team links.

    Returns:
        Dictionary mapping team names (lowercase) to full URLs
    """
    global _team_index_cache  # pylint: disable=global-statement

    if _team_index_cache is not None:
        logger.debug("Using cached team index with %d teams", len(_team_index_cache))
        return _team_index_cache

    logger.info("Building team index from main page...")
    soup = _fetch_page(MAIN_PAGE_URL)

    if soup is None:
        logger.error("Failed to fetch main page for team index")
        return {}

    team_index: dict[str, str] = {}

    for link in soup.find_all("a", href=True):
        href = link["href"]
        text = link.get_text(strip=True)

        # Team pages follow pattern like /cfb/sec_football/alabama_crimson_tide_football/59
        if "/cfb/" in href and "_football/" in href and text:
            # Skip main category links
            if "ncaa_college_football" not in href and "replay" not in href.lower():
                # Ensure full URL
                if not href.startswith("http"):
                    href = f"https://nfl-video.com{href}"

                # Store with lowercase key for case-insensitive lookup
                team_key = text.lower().strip()
                if team_key and team_key not in team_index:
                    team_index[team_key] = href
                    logger.debug("Indexed team: %s -> %s", text, href)

    logger.info("Built team index with %d teams", len(team_index))
    _team_index_cache = team_index
    return team_index


def get_team_index() -> dict[str, str]:
    """
    Get the team index (public API for debugging/listing teams).

    Returns:
        Dictionary mapping team names to their page URLs
    """
    return _build_team_index()


def clear_team_index_cache() -> None:
    """Clear the cached team index (useful for testing)."""
    global _team_index_cache  # pylint: disable=global-statement
    _team_index_cache = None
    logger.debug("Cleared team index cache")


def _find_team_url(team_name: str) -> Optional[str]:
    """
    Find the URL for a team's dedicated page.

    Performs fuzzy matching to handle variations like:
    - "Ohio State" matches "ohio state"
    - "OSU" might not match (would need alias support)
    - "Texas A&M" matches "texas a&m"

    Args:
        team_name: Team name to search for

    Returns:
        Team page URL or None if not found
    """
    team_index = _build_team_index()
    team_lower = team_name.lower().strip()

    # Exact match first
    if team_lower in team_index:
        return team_index[team_lower]

    # Partial match - check if search term is contained in any team name
    for indexed_name, url in team_index.items():
        if team_lower in indexed_name or indexed_name in team_lower:
            logger.debug("Fuzzy matched '%s' to '%s'", team_name, indexed_name)
            return url

    logger.warning("Team not found in index: %s", team_name)
    return None


def _parse_game_title(title: str) -> dict:
    """
    Parse a game title to extract team names, year, and event.

    Example titles:
        "Ohio State vs Oregon Football 2024 Big Ten Championship Full Game Replay"
        "Alabama vs. Oklahoma Football December 19, 2025 CFP First Round Full Game Replay"

    Args:
        title: Full game title string

    Returns:
        Dictionary with keys: team_a, team_b, year, event (some may be None)
    """
    result = {"team_a": None, "team_b": None, "year": None, "event": None}

    # Clean up the title
    title = title.strip()

    # Pattern: "Team A vs Team B Football [Date] [Event] Full Game Replay"
    # Handle both "vs" and "vs." (with period)
    vs_match = re.match(r"^(.+?)\s+vs\.?\s+(.+?)\s+Football\s+", title, re.IGNORECASE)
    if vs_match:
        result["team_a"] = vs_match.group(1).strip()
        result["team_b"] = vs_match.group(2).strip()

    # Extract year (4-digit number)
    year_match = re.search(r"\b(20\d{2})\b", title)
    if year_match:
        result["year"] = int(year_match.group(1))

    # Extract event - everything between the date and "Full Game Replay"
    # Format: "Football [Month Day, Year] [Event] Full Game Replay"
    event_match = re.search(r"Football\s+\w+\s+\d{1,2},?\s+20\d{2}\s+(.+?)\s+Full Game Replay", title, re.IGNORECASE)
    if event_match:
        event = event_match.group(1).strip()
        if event and event.lower() not in ["full", "game", "replay"]:
            result["event"] = event

    return result


def _extract_games_from_page(soup: BeautifulSoup, filter_team: Optional[str] = None) -> list[GameResult]:
    """
    Extract game listings from a parsed page.

    Args:
        soup: BeautifulSoup object of the page
        filter_team: Optional team name to filter for (for team-specific pages that may include unrelated games)

    Returns:
        List of GameResult objects found on the page
    """
    games = []

    for link in soup.find_all("a", href=True):
        text = link.get_text(strip=True)

        # Skip links that don't look like game titles
        if "Full Game Replay" not in text:
            continue

        # Skip if it doesn't have "vs" or "vs." (not a game matchup)
        text_lower = text.lower()
        if " vs " not in text_lower and " vs. " not in text_lower:
            continue

        href = link["href"]

        # Make sure it's a full URL
        if not href.startswith("http"):
            href = f"https://nfl-video.com{href}"

        # Skip if we've already seen this URL (duplicates on page)
        if any(g.url == href for g in games):
            continue

        # Parse the title for metadata
        parsed = _parse_game_title(text)

        if parsed["team_a"] and parsed["team_b"]:
            # If filter_team specified, skip games that don't involve that team
            if filter_team:
                filter_lower = filter_team.lower()
                if filter_lower not in parsed["team_a"].lower() and filter_lower not in parsed["team_b"].lower():
                    continue

            # Try to find thumbnail
            thumbnail_url = None
            parent = link.find_parent()
            if parent:
                img = parent.find("img")
                if img and img.get("src"):
                    thumbnail_url = img["src"]
                    if not thumbnail_url.startswith("http"):
                        thumbnail_url = f"https://nfl-video.com{thumbnail_url}"

            game = GameResult(
                title=text,
                team_a=parsed["team_a"],
                team_b=parsed["team_b"],
                url=href,
                thumbnail_url=thumbnail_url,
                year=parsed["year"],
                event=parsed["event"],
            )
            games.append(game)
            logger.debug("Found game: %s", game)

    return games


def _team_matches(game: GameResult, team_name: str) -> bool:
    """
    Check if a game involves a given team.

    Args:
        game: GameResult to check
        team_name: Team name to search for

    Returns:
        True if the team appears in either team_a or team_b
    """
    team_lower = team_name.lower()
    return team_lower in game.team_a.lower() or team_lower in game.team_b.lower()


def search_games(
    team_a: str,
    team_b: Optional[str] = None,
    max_pages: int = 5,
    delay_seconds: float = REQUEST_DELAY_SECONDS,
) -> SearchResults:
    """
    Search for college football games by team name(s).

    Uses team-specific pages for efficient searching:
    1. Looks up team_a's dedicated page in the team index
    2. Fetches all games from that team's page
    3. If team_b specified, filters to only games against that opponent

    Args:
        team_a: Primary team name to search for (required)
        team_b: Optional opponent name - if provided, only games against this team are returned
        max_pages: Maximum number of pages to search on the team's page (default 5)
        delay_seconds: Delay between page requests to avoid rate limiting

    Returns:
        SearchResults object containing matching games and search metadata

    Example:
        # Find all Alabama games
        results = search_games("Alabama")

        # Find Alabama vs Georgia specifically
        results = search_games("Alabama", "Georgia")
    """
    logger.info("Searching for games: team_a='%s', team_b='%s'", team_a, team_b)

    # Find the team's dedicated page URL
    team_url = _find_team_url(team_a)

    if team_url is None:
        logger.warning("Could not find team page for '%s', returning empty results", team_a)
        return SearchResults(
            query_team_a=team_a,
            query_team_b=team_b,
            games=[],
            pages_searched=0,
            total_games_scanned=0,
        )

    logger.info("Found team page for '%s': %s", team_a, team_url)

    matching_games: list[GameResult] = []
    total_scanned = 0

    # Fetch pages from the team's dedicated page
    for page_num in range(1, max_pages + 1):
        # Team pages use same pagination pattern as main: base URL, then base-2, base-3, etc.
        if page_num == 1:
            page_url = team_url
        else:
            page_url = f"{team_url}-{page_num}"

        logger.info("Searching page %d/%d: %s", page_num, max_pages, page_url)

        soup = _fetch_page(page_url)
        if soup is None:
            logger.warning("Failed to fetch page %d, stopping search", page_num)
            break

        # Extract games, filtering for the team (team pages sometimes include unrelated sidebar games)
        page_games = _extract_games_from_page(soup, filter_team=team_a)
        total_scanned += len(page_games)

        # Filter by opponent if specified
        for game in page_games:
            # If team_b specified, must also match team_b
            if team_b and not _team_matches(game, team_b):
                continue

            # Avoid duplicates (same URL)
            if not any(g.url == game.url for g in matching_games):
                matching_games.append(game)
                logger.info("Found matching game: %s", game)

        # Be polite - add delay between requests
        if page_num < max_pages:
            time.sleep(delay_seconds)

    results = SearchResults(
        query_team_a=team_a,
        query_team_b=team_b,
        games=matching_games,
        pages_searched=max_pages,
        total_games_scanned=total_scanned,
    )

    logger.info("Search complete: %s", results)
    return results


def list_available_teams() -> list[str]:
    """
    Get a list of all available team names.

    Useful for populating dropdown menus or autocomplete in the UI.

    Returns:
        Sorted list of team names
    """
    team_index = _build_team_index()
    # Return with original casing (capitalize each word)
    return sorted([name.title() for name in team_index])