repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
pkjmesra/PKScreener | https://github.com/pkjmesra/PKScreener/blob/c03a12626a557190678ff47897077bdf7784495c/pkscreener/classes/screening/__init__.py | pkscreener/classes/screening/__init__.py | """
The MIT License (MIT)
Copyright (c) 2023 pkjmesra
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pkscreener.classes.screening.signals import TradingSignals, SignalStrength
__all__ = ['TradingSignals', 'SignalStrength']
| python | MIT | c03a12626a557190678ff47897077bdf7784495c | 2026-01-05T06:31:20.733224Z | false |
pkjmesra/PKScreener | https://github.com/pkjmesra/PKScreener/blob/c03a12626a557190678ff47897077bdf7784495c/pkscreener/classes/bot/__init__.py | pkscreener/classes/bot/__init__.py | """
Bot module for PKScreener Telegram Bot
Contains refactored bot handler classes for better maintainability.
"""
from pkscreener.classes.bot.BotHandlers import (
PKBotLocalCache,
BotConstants,
UserHandler,
MenuHandler,
SubscriptionHandler,
MarketTimeHandler,
TextSanitizer
)
__all__ = [
'PKBotLocalCache',
'BotConstants',
'UserHandler',
'MenuHandler',
'SubscriptionHandler',
'MarketTimeHandler',
'TextSanitizer'
]
| python | MIT | c03a12626a557190678ff47897077bdf7784495c | 2026-01-05T06:31:20.733224Z | false |
pkjmesra/PKScreener | https://github.com/pkjmesra/PKScreener/blob/c03a12626a557190678ff47897077bdf7784495c/pkscreener/classes/bot/BotHandlers.py | pkscreener/classes/bot/BotHandlers.py | #!/usr/bin/python3
"""
The MIT License (MIT)
Copyright (c) 2023 pkjmesra
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
BotHandlers - Refactored bot handlers for PKScreener Telegram Bot
This module contains refactored handler classes for better maintainability:
- UserHandler: User registration and authentication
- MenuHandler: Menu navigation and rendering
- ScanHandler: Scan execution and result handling
- SubscriptionHandler: User subscription management
"""
import logging
import re
import threading
from time import sleep
from PKDevTools.classes.Singleton import SingletonType, SingletonMixin
from PKDevTools.classes.PKDateUtilities import PKDateUtilities
from PKDevTools.classes.MarketHours import MarketHours
logger = logging.getLogger(__name__)
class PKBotLocalCache(SingletonMixin, metaclass=SingletonType):
"""Singleton cache for bot-related data."""
def __init__(self):
super(PKBotLocalCache, self).__init__()
self.registered_ids = []
self.user_states = {}
class BotConstants:
"""Constants used across bot handlers."""
MAX_MSG_LENGTH = 4096
OWNER_USER = "Itsonlypk"
APOLOGY_TEXT = ("Apologies! The @nse_pkscreener_bot is NOT available for the time being! "
"We are working with our host GitHub and other data source providers to sort out "
"pending invoices and restore the services soon! Thanks for your patience and support! 🙏")
# Menu skip configurations
TOP_LEVEL_SCANNER_MENUS = ["X", "B", "MI", "DV", "P"]
TOP_LEVEL_SCANNER_SKIP_MENUS = ["M", "S", "F", "G", "C", "T", "D", "I", "E", "U", "L", "Z", "P"]
INDEX_COMMANDS_SKIP_MENUS_SCANNER = ["W", "E", "M", "Z", "S"]
INDEX_COMMANDS_SKIP_MENUS_BACKTEST = ["W", "E", "M", "Z", "S", "N", "0", "15"]
SCANNER_MENUS_WITH_NO_SUBMENUS = [
"1", "2", "3", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20",
"21", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35",
"36", "37", "38", "39", "40", "41", "42", "43", "44", "45"
]
SCANNER_MENUS_WITH_SUBMENU_SUPPORT = ["6", "7", "21", "22", "30", "32", "33", "40"]
SCANNER_SUBMENUS_CHILDLEVEL_SUPPORT = {
"6": ["7", "10"],
"7": ["3", "6", "7", "9"]
}
class UserHandler:
"""Handles user registration, authentication, and OTP."""
def __init__(self, config_manager):
"""
Initialize UserHandler.
Args:
config_manager: Configuration manager instance
"""
self.config_manager = config_manager
self.cache = PKBotLocalCache()
def register_user(self, user, force_fetch=False):
"""
Register a user and get OTP.
Args:
user: Telegram user object
force_fetch: Force fetch from database
Returns:
tuple: (otp_value, subscription_model, subscription_validity, alert_user)
"""
otp_value, subs_model, subs_validity, alert_user = 0, 0, None, None
if user is not None and (user.id not in self.cache.registered_ids or force_fetch):
try:
from PKDevTools.classes.DBManager import DBManager
db_manager = DBManager()
otp_value, subs_model, subs_validity, alert_user = db_manager.getOTP(
user.id, user.username,
f"{user.first_name} {user.last_name}",
validityIntervalInSeconds=self.config_manager.otpInterval
)
if str(otp_value).strip() != '0' and user.id not in self.cache.registered_ids:
self.cache.registered_ids.append(user.id)
except Exception as e:
logger.error(f"Error registering user: {e}")
return otp_value, subs_model, subs_validity, alert_user
def load_registered_users(self):
"""Load all registered users from database."""
try:
from PKDevTools.classes.DBManager import DBManager
db_manager = DBManager()
users = db_manager.getUsers(fieldName="userid")
user_ids = [user.userid for user in users]
self.cache.registered_ids.extend(user_ids)
except Exception as e:
logger.error(f"Error loading registered users: {e}")
class MenuHandler:
"""Handles menu navigation and rendering for the bot."""
def __init__(self):
"""Initialize MenuHandler."""
from pkscreener.classes.MenuOptions import menus
self.m0 = menus()
self.m1 = menus()
self.m2 = menus()
self.m3 = menus()
self.m4 = menus()
def get_menu_for_level(self, level, parent_menu=None, skip_menus=None):
"""
Get menu items for a specific level.
Args:
level: Menu level (0-4)
parent_menu: Parent menu for rendering
skip_menus: List of menu keys to skip
Returns:
list: Menu items for the level
"""
if skip_menus is None:
skip_menus = []
menu = getattr(self, f'm{level}')
menu.renderForMenu(selectedMenu=parent_menu, skip=skip_menus, asList=True)
return [m for m in menu.menuDict.values() if m.menuKey not in skip_menus]
def create_inline_keyboard(self, menu_items, callback_prefix=""):
"""
Create inline keyboard markup from menu items.
Args:
menu_items: List of menu items
callback_prefix: Prefix for callback data
Returns:
InlineKeyboardMarkup: Telegram inline keyboard
"""
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
keyboard = []
row = []
for item in menu_items:
button = InlineKeyboardButton(
text=item.menuText[:30], # Limit button text length
callback_data=f"{callback_prefix}{item.menuKey}"
)
row.append(button)
if len(row) >= 2: # 2 buttons per row
keyboard.append(row)
row = []
if row:
keyboard.append(row)
return InlineKeyboardMarkup(keyboard)
class SubscriptionHandler:
"""Handles user subscription management."""
def __init__(self):
"""Initialize SubscriptionHandler."""
pass
def update_subscription(self, user_id, sub_value, sub_type="add"):
"""
Update user subscription.
Args:
user_id: Telegram user ID
sub_value: Subscription value
sub_type: Type of update ("add" or "remove")
Returns:
str: Result message or None on success
"""
from pkscreener.classes.WorkflowManager import run_workflow
from PKDevTools.classes.Environment import PKEnvironment
workflow_name = "w18-workflow-sub-data.yml"
branch = "main"
updated_results = None
try:
workflow_post_data = (
'{"ref":"'
+ branch
+ '","inputs":{"userid":"'
+ f"{user_id}"
+ '","subtype":"'
+ f"{sub_type}"
+ '","subvalue":"'
+ f"{sub_value}"
+ '"}}'
)
ghp_token = PKEnvironment().allSecrets["PKG"]
resp = run_workflow(
workflowType="O",
repo="PKScreener",
owner="pkjmesra",
branch=branch,
ghp_token=ghp_token,
workflow_name=workflow_name,
workflow_postData=workflow_post_data
)
if resp is not None and resp.status_code != 204:
updated_results = ("Uh oh! We ran into a problem enabling your subscription.\n"
"Please reach out to @ItsOnlyPK to resolve.")
except Exception as e:
logger.error(f"Error updating subscription: {e}")
updated_results = ("Uh oh! We ran into a problem enabling your subscription.\n"
"Please reach out to @ItsOnlyPK to resolve.")
return updated_results
def match_utr(self, utr):
"""
Match UTR to transaction.
Args:
utr: UTR number to match
Returns:
dict: Matched transaction or None
"""
try:
from PKDevTools.classes.GmailReader import PKGmailReader
return PKGmailReader.matchUTR(utr=utr)
except Exception as e:
logger.error(f"Error matching UTR: {e}")
return None
class MarketTimeHandler:
"""Handles market time-related operations."""
@staticmethod
def is_in_market_hours():
"""
Check if current time is within market hours.
Returns:
bool: True if in market hours
"""
now = PKDateUtilities.currentDateTime()
market_start_time = PKDateUtilities.currentDateTime(
simulate=True,
hour=MarketHours().openHour,
minute=MarketHours().openMinute
)
market_close_time = PKDateUtilities.currentDateTime(
simulate=True,
hour=MarketHours().closeHour,
minute=MarketHours().closeMinute
)
return (not PKDateUtilities.isTodayHoliday()[0] and
now >= market_start_time and
now <= market_close_time)
@staticmethod
def initialize_intraday_timer(callback_func):
"""
Initialize timer for intraday monitoring.
Args:
callback_func: Function to call when timer fires
Returns:
threading.Timer: Timer object or None
"""
try:
if PKDateUtilities.isTodayHoliday()[0]:
return None
now = PKDateUtilities.currentDateTime()
market_start_time = PKDateUtilities.currentDateTime(
simulate=True,
hour=MarketHours().openHour,
minute=MarketHours().openMinute - 1
)
market_close_time = PKDateUtilities.currentDateTime(
simulate=True,
hour=MarketHours().closeHour,
minute=MarketHours().closeMinute
)
market_open_prior = PKDateUtilities.currentDateTime(
simulate=True,
hour=MarketHours().openHour - 2,
minute=MarketHours().openMinute + 30
)
if now < market_start_time and now >= market_open_prior:
difference = (market_start_time - now).total_seconds() + 1
timer = threading.Timer(difference, callback_func, args=[])
timer.start()
return timer
elif now >= market_start_time and now <= market_close_time:
callback_func()
return None
except Exception as e:
logger.error(f"Error initializing intraday timer: {e}")
callback_func()
return None
class TextSanitizer:
"""Utility class for text sanitization."""
@staticmethod
def sanitize(text, max_length=4096):
"""
Sanitize text for Telegram message.
Args:
text: Text to sanitize
max_length: Maximum message length
Returns:
str: Sanitized text
"""
if text is None:
return ""
elif len(text) > max_length:
return text[:max_length]
return text
@staticmethod
def escape_html(text):
"""
Escape HTML characters in text.
Args:
text: Text to escape
Returns:
str: Escaped text
"""
import html
return html.escape(str(text))
| python | MIT | c03a12626a557190678ff47897077bdf7784495c | 2026-01-05T06:31:20.733224Z | false |
pkjmesra/PKScreener | https://github.com/pkjmesra/PKScreener/blob/c03a12626a557190678ff47897077bdf7784495c/pkscreener/classes/cli/PKCliRunner.py | pkscreener/classes/cli/PKCliRunner.py | #!/usr/bin/python3
"""
The MIT License (MIT)
Copyright (c) 2023 pkjmesra
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
PKCliRunner - Refactored CLI Runner for PKScreener
This module handles the CLI application execution, including:
- Intraday analysis report generation
- Progress status updates
- Monitor and piped scan handling
- Configuration duration management
"""
import os
import sys
import traceback
from time import sleep
from PKDevTools.classes.ColorText import colorText
from PKDevTools.classes.log import default_logger
from PKDevTools.classes.PKDateUtilities import PKDateUtilities
from PKDevTools.classes.OutputControls import OutputControls
from PKDevTools.classes import Archiver
import pkscreener.classes.ConfigManager as ConfigManager
class PKCliRunner:
"""
Handles CLI execution flow including application running,
piped scans, and monitor modes.
"""
def __init__(self, config_manager, args):
"""
Initialize the CLI runner.
Args:
config_manager: Configuration manager instance
args: Parsed command line arguments
"""
self.config_manager = config_manager
self.args = args
self.results = None
self.result_stocks = None
self.plain_results = None
self.db_timestamp = None
self.elapsed_time = 0
self.start_time = None
def update_progress_status(self, monitor_options=None):
"""
Update progress status for display.
Args:
monitor_options: Optional monitor options string
Returns:
tuple: (args, choices)
"""
from pkscreener.classes.MenuOptions import PREDEFINED_SCAN_MENU_TEXTS, PREDEFINED_SCAN_MENU_VALUES
choices = ""
try:
if self.args.systemlaunched or monitor_options is not None:
options_to_use = self.args.options if monitor_options is None else monitor_options
choices = f"--systemlaunched -a y -e -o '{options_to_use.replace('C:','X:').replace('D:','')}'"
from pkscreener.classes.MenuOptions import INDICES_MAP
search_choices = choices
for index_key in INDICES_MAP.keys():
if index_key.isnumeric():
search_choices = search_choices.replace(f"X:{index_key}:", "X:12:")
index_num = PREDEFINED_SCAN_MENU_VALUES.index(search_choices)
selected_index_option = choices.split(":")[1]
choices = f"P_1_{str(index_num + 1)}_{str(selected_index_option)}" if ">|" in choices else choices
self.args.progressstatus = f" [+] {choices} => Running {choices}"
self.args.usertag = PREDEFINED_SCAN_MENU_TEXTS[index_num]
self.args.maxdisplayresults = 2000
except Exception as e:
default_logger().debug(f"Error handling predefined scan: {e}")
choices = ""
return self.args, choices
def check_intraday_component(self, monitor_option):
"""
Check and handle intraday component in monitor option.
Args:
monitor_option: Monitor option string
Returns:
str: Modified monitor option
"""
last_component = monitor_option.split(":")[-1]
if "i" not in last_component:
possible_positions = monitor_option.split(":i")
if len(possible_positions) > 1:
last_component = f"i {possible_positions[1]}"
if "i" in last_component:
# Switch to intraday scan
monitor_option = monitor_option.replace(last_component, "")
self.args.intraday = last_component.replace("i", "").strip()
self.config_manager.toggleConfig(candleDuration=self.args.intraday, clearCache=False)
else:
# Switch to daily scan
self.args.intraday = None
self.config_manager.toggleConfig(candleDuration='1d', clearCache=False)
return monitor_option
def update_config_durations(self):
"""Update configuration durations based on args options."""
if self.args is None or self.args.options is None:
return
next_ones = self.args.options.split(">")
if len(next_ones) > 1:
monitor_option = next_ones[0]
if len(monitor_option) == 0:
return
last_component = ":".join(monitor_option.split(":")[-2:])
if "i" in last_component and "," not in last_component and " " in last_component:
if "i" in last_component.split(":")[-2]:
last_component = last_component.split(":")[-2]
else:
last_component = last_component.split(":")[-1]
# Switch to intraday scan
self.args.intraday = last_component.replace("i", "").strip()
self.config_manager.toggleConfig(candleDuration=self.args.intraday, clearCache=False)
else:
# Switch to daily scan
self.args.intraday = None
self.config_manager.toggleConfig(candleDuration='1d', clearCache=False)
def pipe_results(self, prev_output):
"""
Pipe results from previous scan to next scan.
Args:
prev_output: Previous scan output dataframe
Returns:
bool: Whether to continue with piped scan
"""
if self.args is None or self.args.options is None:
return False
has_found_stocks = False
next_ones = self.args.options.split(">")
if len(next_ones) > 1:
monitor_option = next_ones[1]
if len(monitor_option) == 0:
return False
last_component = ":".join(monitor_option.split(":")[-2:])
if "i" in last_component and "," not in last_component and " " in last_component:
if "i" in last_component.split(":")[-2]:
last_component = last_component.split(":")[-2]
else:
last_component = last_component.split(":")[-1]
# Switch to intraday scan
monitor_option = monitor_option.replace(last_component, "")
self.args.intraday = last_component.replace("i", "").strip()
self.config_manager.toggleConfig(candleDuration=self.args.intraday, clearCache=False)
else:
# Switch to daily scan
self.args.intraday = None
self.config_manager.toggleConfig(candleDuration='1d', clearCache=False)
if monitor_option.startswith("|"):
monitor_option = monitor_option.replace("|", "")
monitor_options = monitor_option.split(":")
if monitor_options[0].upper() in ["X", "C"] and monitor_options[1] != "0":
monitor_options[1] = "0"
monitor_option = ":".join(monitor_options)
if "B" in monitor_options[0].upper() and monitor_options[1] != "30":
monitor_option = ":".join(monitor_options).upper().replace(
f"{monitor_options[0].upper()}:{monitor_options[1]}",
f"{monitor_options[0].upper()}:30:{monitor_options[1]}"
)
# Pipe output from previous run
if prev_output is not None and not prev_output.empty:
try:
prev_output.set_index("Stock", inplace=True)
except Exception:
pass # Index may already be set or column may not exist
prev_output_results = prev_output[~prev_output.index.duplicated(keep='first')]
prev_output_results = prev_output_results.index
has_found_stocks = len(prev_output_results) > 0
prev_output_results = ",".join(prev_output_results)
monitor_option = monitor_option.replace(":D:", ":")
monitor_option = f"{monitor_option}:{prev_output_results}"
self.args.options = monitor_option.replace("::", ":")
self.args.options = self.args.options + ":D:>" + ":D:>".join(next_ones[2:])
self.args.options = self.args.options.replace("::", ":")
return True and has_found_stocks
return False
def update_config(self):
"""Update configuration based on args."""
if self.args is None:
return
self.config_manager.getConfig(ConfigManager.parser)
if self.args.intraday:
self.config_manager.toggleConfig(candleDuration=self.args.intraday, clearCache=False)
if (self.config_manager.candlePeriodFrequency not in ["d", "mo"] or
self.config_manager.candleDurationFrequency not in ["m"]):
self.config_manager.period = "1d"
self.config_manager.duration = self.args.intraday
self.config_manager.setConfig(ConfigManager.parser, default=True, showFileCreatedText=False)
elif (self.config_manager.candlePeriodFrequency not in ["y", "max", "mo"] or
self.config_manager.candleDurationFrequency not in ["d", "wk", "mo", "h"]):
if self.args.answerdefault is not None or self.args.systemlaunched:
self.config_manager.period = "1y"
self.config_manager.duration = "1d"
self.config_manager.setConfig(ConfigManager.parser, default=True, showFileCreatedText=False)
class IntradayAnalysisRunner:
"""Handles intraday analysis report generation."""
def __init__(self, config_manager, args):
"""
Initialize the intraday analysis runner.
Args:
config_manager: Configuration manager instance
args: Parsed command line arguments
"""
self.config_manager = config_manager
self.args = args
def generate_reports(self):
"""Generate intraday analysis reports."""
from pkscreener.globals import (
main, isInterrupted, closeWorkersAndExit,
resetUserMenuChoiceOptions, showBacktestResults
)
from pkscreener.classes.MenuOptions import (
menus, PREDEFINED_SCAN_MENU_TEXTS,
PREDEFINED_PIPED_MENU_ANALYSIS_OPTIONS, PREDEFINED_SCAN_MENU_VALUES
)
import pandas as pd
from pkscreener.classes import Utility
# Save and set max display results
max_display_results = self.config_manager.maxdisplayresults
self.config_manager.maxdisplayresults = 2000
self.config_manager.setConfig(ConfigManager.parser, default=True, showFileCreatedText=False)
run_options = []
other_menus = []
if len(self.args.options.split(":")) >= 4:
run_options = [self.args.options]
else:
run_options = PREDEFINED_PIPED_MENU_ANALYSIS_OPTIONS
if len(other_menus) > 0:
run_options.extend(other_menus)
optional_final_outcome_df = pd.DataFrame()
cli_runner = PKCliRunner(self.config_manager, self.args)
# Delete existing data from previous run
self.config_manager.deleteFileWithPattern(
rootDir=Archiver.get_user_data_dir(),
pattern="stock_data_*.pkl"
)
analysis_index = 1
for run_option in run_options:
try:
run_option_name = f"--systemlaunched -a y -e -o '{run_option.replace('C:','X:').replace('D:','')}'"
index_num = PREDEFINED_SCAN_MENU_VALUES.index(run_option_name)
run_option_name = f"{' [+] P_1_'+str(index_num + 1) if '>|' in run_option else run_option}"
except Exception as e:
default_logger().debug(e, exc_info=True)
run_option_name = f" [+] {run_option.replace('D:','').replace(':D','').replace(':','_').replace('_D','').replace('C_','X_')}"
self.args.progressstatus = f"{run_option_name} => Running Intraday Analysis: {analysis_index} of {len(run_options)}..."
# Update analysis options
analysis_options = run_option.split("|")
analysis_options[-1] = analysis_options[-1].replace("X:", "C:")
run_option = "|".join(analysis_options)
self.args.options = run_option
try:
results, plain_results = main(userArgs=self.args, optionalFinalOutcome_df=optional_final_outcome_df)
if self.args.pipedmenus is not None:
while self.args.pipedmenus is not None:
results, plain_results = main(userArgs=self.args)
if isInterrupted():
closeWorkersAndExit()
return
run_piped_scans = True
while run_piped_scans:
run_piped_scans = cli_runner.pipe_results(plain_results)
if run_piped_scans:
self.args, _ = cli_runner.update_progress_status()
results, plain_results = main(
userArgs=self.args,
optionalFinalOutcome_df=optional_final_outcome_df
)
if (results is not None and
len(results) >= len(optional_final_outcome_df) and
not results.empty and
len(results.columns) > 5):
import numpy as np
if "%Chng" in results.columns and "EoDDiff" in results.columns:
optional_final_outcome_df = results
if (optional_final_outcome_df is not None and
"EoDDiff" not in optional_final_outcome_df.columns):
# File corrupted, re-download
self.config_manager.deleteFileWithPattern(
rootDir=Archiver.get_user_data_dir(),
pattern="*stock_data_*.pkl"
)
self.config_manager.deleteFileWithPattern(
rootDir=Archiver.get_user_data_dir(),
pattern="*intraday_stock_data_*.pkl"
)
if isInterrupted():
break
except KeyboardInterrupt:
closeWorkersAndExit()
return
except Exception as e:
OutputControls().printOutput(e)
if self.args.log:
traceback.print_exc()
resetUserMenuChoiceOptions()
analysis_index += 1
# Restore settings
self.config_manager.maxdisplayresults = max_display_results
self.config_manager.setConfig(ConfigManager.parser, default=True, showFileCreatedText=False)
# Save and send final outcome
self._save_send_final_outcome(optional_final_outcome_df)
def _save_send_final_outcome(self, optional_final_outcome_df):
"""
Save and send final outcome dataframe.
Args:
optional_final_outcome_df: Final outcome dataframe
"""
import pandas as pd
from pkscreener.classes import Utility
from pkscreener.globals import sendQuickScanResult, showBacktestResults
if optional_final_outcome_df is None or optional_final_outcome_df.empty:
return
final_df = None
try:
optional_final_outcome_df.drop('FairValue', axis=1, inplace=True, errors="ignore")
df_grouped = optional_final_outcome_df.groupby("Stock")
for stock, df_group in df_grouped:
if stock == "BASKET":
cols = ["Pattern", "LTP", "LTP@Alert", "SqrOffLTP", "SqrOffDiff", "EoDDiff", "DayHigh", "DayHighDiff"]
if final_df is None:
final_df = df_group[cols]
else:
final_df = pd.concat([final_df, df_group[cols]], axis=0)
except Exception as e:
default_logger().debug(f"Error processing intraday analysis: {e}")
if final_df is None or final_df.empty:
return
with pd.option_context('mode.chained_assignment', None):
final_df = final_df[["Pattern", "LTP@Alert", "LTP", "EoDDiff", "DayHigh", "DayHighDiff"]]
final_df.rename(
columns={
"Pattern": "Scan Name",
"LTP@Alert": "Basket Value@Alert",
"LTP": "Basket Value@EOD",
"DayHigh": "Basket Value@DayHigh",
},
inplace=True,
)
final_df.dropna(inplace=True)
final_df.dropna(how="all", axis=1, inplace=True)
mark_down = colorText.miniTabulator().tabulate(
final_df,
headers="keys",
tablefmt=colorText.No_Pad_GridFormat,
showindex=False
).encode("utf-8").decode(Utility.STD_ENCODING)
showBacktestResults(final_df, optionalName="Intraday_Backtest_Result_Summary", choices="Summary")
OutputControls().printOutput(mark_down)
from PKDevTools.classes.Environment import PKEnvironment
channel_id, _, _, _ = PKEnvironment().secrets
if channel_id is not None and len(str(channel_id)) > 0:
sendQuickScanResult(
menuChoiceHierarchy="IntradayAnalysis (If you would have bought at alert time and sold at end of day or day high)",
user=int(f"-{channel_id}"),
tabulated_results=mark_down,
markdown_results=mark_down,
caption="Intraday Analysis Summary - Morning alert vs Market Close",
pngName=f"PKS_IA_{PKDateUtilities.currentDateTime().strftime('%Y-%m-%d_%H:%M:%S')}",
pngExtension=".png",
forceSend=True
)
class CliConfigManager:
"""Manages CLI-specific configuration and initialization."""
def __init__(self, config_manager, args):
"""
Initialize CLI config manager.
Args:
config_manager: Configuration manager instance
args: Parsed command line arguments
"""
self.config_manager = config_manager
self.args = args
@staticmethod
def remove_old_instances():
"""Remove old CLI instances."""
import glob
pattern = "pkscreenercli*"
this_instance = sys.argv[0]
for f in glob.glob(pattern, root_dir=os.getcwd(), recursive=True):
file_to_delete = (f if (os.sep in f and f.startswith(this_instance[:10]))
else os.path.join(os.getcwd(), f))
if not file_to_delete.endswith(this_instance):
try:
os.remove(file_to_delete)
except OSError:
pass # File may be in use or already deleted
def validate_tos_acceptance(self):
"""
Validate Terms of Service acceptance.
Returns:
bool: True if TOS accepted, False otherwise
"""
user_acceptance = self.config_manager.tosAccepted
if not self.config_manager.tosAccepted:
if (self.args is not None and
self.args.answerdefault is not None and
str(self.args.answerdefault).lower() == "n"):
OutputControls().printOutput(
f"{colorText.FAIL}You seem to have passed disagreement to the Disclaimer and Terms Of Service of PKScreener by passing in {colorText.END}"
f"{colorText.WARN}--answerdefault N or -a N{colorText.END}. Exiting now!"
)
sleep(5)
return False
all_args = self.args.__dict__
disclaimer_link = '\x1b[97m\x1b]8;;https://pkjmesra.github.io/PKScreener/Disclaimer.txt\x1b\\https://pkjmesra.github.io/PKScreener/Disclaimer.txt\x1b]8;;\x1b\\\x1b[0m'
tos_link = '\x1b[97m\x1b]8;;https://pkjmesra.github.io/PKScreener/tos.txt\x1b\\https://pkjmesra.github.io/PKScreener/tos.txt\x1b]8;;\x1b\\\x1b[0m'
for arg_key in all_args.keys():
arg = all_args[arg_key]
if arg is not None and arg:
user_acceptance = True
OutputControls().printOutput(
f"{colorText.GREEN}By using this Software and passing a value for [{arg_key}={arg}], you agree to\n"
f"[+] having read through the Disclaimer{colorText.END} ({disclaimer_link})\n"
f"[+]{colorText.GREEN} and accept Terms Of Service {colorText.END}({tos_link}){colorText.GREEN} of PKScreener. {colorText.END}\n"
f"[+] {colorText.WARN}If that is not the case, you MUST immediately terminate PKScreener by pressing Ctrl+C now!{colorText.END}"
)
sleep(2)
break
if (not user_acceptance and
((self.args is not None and self.args.answerdefault is not None and str(self.args.answerdefault).lower() != "y") or
(self.args is not None and self.args.answerdefault is None))):
disclaimer_link = '\x1b[97m\x1b]8;;https://pkjmesra.github.io/PKScreener/Disclaimer.txt\x1b\\https://pkjmesra.github.io/PKScreener/Disclaimer.txt\x1b]8;;\x1b\\\x1b[0m'
tos_link = '\x1b[97m\x1b]8;;https://pkjmesra.github.io/PKScreener/tos.txt\x1b\\https://pkjmesra.github.io/PKScreener/tos.txt\x1b]8;;\x1b\\\x1b[0m'
user_acceptance = OutputControls().takeUserInput(
f"{colorText.WARN}By using this Software, you agree to\n"
f"[+] having read through the Disclaimer {colorText.END}({disclaimer_link}){colorText.WARN}\n"
f"[+] and accept Terms Of Service {colorText.END}({tos_link}){colorText.WARN} of PKScreener ? {colorText.END}"
f"(Y/N){colorText.GREEN} [Default: {colorText.END}{colorText.FAIL}N{colorText.END}{colorText.GREEN}] :{colorText.END}",
defaultInput="N",
enableUserInput=True
) or "N"
if str(user_acceptance).lower() != "y":
OutputControls().printOutput(
f"\n{colorText.WARN}You seem to have\n"
f" [+] passed disagreement to the Disclaimer and \n"
f" [+] not accepted Terms Of Service of PKScreener.\n{colorText.END}"
f"{colorText.FAIL}[+] You MUST read and agree to the disclaimer and MUST accept the Terms of Service to use PKScreener.{colorText.END}\n\n"
f"{colorText.WARN}Exiting now!{colorText.END}"
)
sleep(5)
return False
return True
| python | MIT | c03a12626a557190678ff47897077bdf7784495c | 2026-01-05T06:31:20.733224Z | false |
pkjmesra/PKScreener | https://github.com/pkjmesra/PKScreener/blob/c03a12626a557190678ff47897077bdf7784495c/pkscreener/classes/cli/__init__.py | pkscreener/classes/cli/__init__.py | """
CLI module for PKScreener
Contains refactored CLI components for better maintainability.
"""
from pkscreener.classes.cli.PKCliRunner import PKCliRunner, IntradayAnalysisRunner, CliConfigManager
__all__ = ['PKCliRunner', 'IntradayAnalysisRunner', 'CliConfigManager']
| python | MIT | c03a12626a557190678ff47897077bdf7784495c | 2026-01-05T06:31:20.733224Z | false |
COVID19Tracking/covid-tracking-data | https://github.com/COVID19Tracking/covid-tracking-data/blob/a701975875f0dc4b98482854e9e02eb419b1fb14/data-collection-scripts/ltc-scrapers/python-scrapers/ks_ltc_scraper.py | data-collection-scripts/ltc-scrapers/python-scrapers/ks_ltc_scraper.py | from tableauscraper import TableauScraper as TS
from itertools import repeat
from datetime import datetime
from pytz import timezone
"""Fetch Kansas's Tableau LTC dashboard and output the data table data to STDOUT"""
url = "https://public.tableau.com/views/COVID-19TableauVersion2/ClusterSummary"
ts = TS()
ts.loads(url)
dashboard = ts.getDashboard()
t = dashboard.getWorksheet("Active Outbreak Locations")
df = t.data
# filter only for long-term care facilities and extract the data we need
df = df[df['Type-value'].eq("Long Term Care Facility")]
out_data = df.reindex(columns=["date_collected",
"state",
"County-alias",
"City or State-alias",
"Facility-value",
"Type-value",
"Last Onset Date-value",
"SUM(Number of Cases within 14 days)-alias",
"blank"])
out_data['date_collected'] = datetime.now(timezone('US/Eastern')).strftime('%Y%m%d')
out_data['state'] = 'KS'
# output the dataframe in CSV format to match the facility data entry sheet
out_columns = ["date_collected",
"state",
"County-alias",
"City or State-alias",
"Facility-value",
"Type-value"]
out_columns.extend(repeat("blank", 4))
out_columns.append("Last Onset Date-value")
out_columns.extend(repeat("blank", 23))
out_columns.append("SUM(Number of Cases within 14 days)-alias")
print(out_data.to_csv(index=False, columns=out_columns))
| python | Apache-2.0 | a701975875f0dc4b98482854e9e02eb419b1fb14 | 2026-01-05T06:31:17.718705Z | false |
COVID19Tracking/covid-tracking-data | https://github.com/COVID19Tracking/covid-tracking-data/blob/a701975875f0dc4b98482854e9e02eb419b1fb14/data-collection-scripts/ltc-scrapers/python-scrapers/ut_ltc_scraper.py | data-collection-scripts/ltc-scrapers/python-scrapers/ut_ltc_scraper.py | import json
from datetime import datetime
from pytz import timezone
from urllib.request import urlopen
import sys
from itertools import repeat
import csv
from io import StringIO
"""Fetch Utah's LTC dashboard data and output the data table to STDOUT"""
def load_json():
url = (
"https://services6.arcgis.com/KaHXE9OkiB9e63uE/ArcGIS/rest/services/COVID19_Long_Term_Care_Facility_Impacts/FeatureServer/273/query?where=1%3D1&objectIds=&time=&geometry=&geometryType=esriGeometryEnvelope&inSR=&spatialRel=esriSpatialRelIntersects&resultType=none&distance=0.0&units=esriSRUnit_Meter&returnGeodetic=false&outFields=*&returnGeometry=true&featureEncoding=esriDefault&multipatchOption=xyFootprint&maxAllowableOffset=&geometryPrecision=&outSR=&datumTransformation=&applyVCSProjection=false&returnIdsOnly=false&returnUniqueIdsOnly=false&returnCountOnly=false&returnExtentOnly=false&returnQueryGeometry=false&returnDistinctValues=false&cacheHint=true&orderByFields=&groupByFieldsForStatistics=&outStatistics=&having=&resultOffset=&resultRecordCount=&returnZ=false&returnM=false&returnExceededLimitFeatures=true&quantizationParameters=&sqlFormat=none&f=pjson&token=")
response = urlopen(url)
status_code = response.getcode()
if status_code == 200:
source = response.read()
return json.loads(source)
else:
print("URL request failed with status code: " + status_code)
sys.exit(1)
dict_data = load_json()
if dict_data is None:
print("Facility data was empty")
sys.exit(1)
# build up a list of the output columns, matching the format of the LTC facility data sheet
fieldnames = ["Date Collected", "State"]
fieldnames.extend(repeat("blank", 2))
fieldnames.extend(["Facility_Name", "Facility_Type"])
fieldnames.extend(repeat("blank", 6))
fieldnames.append("Resolved_Y_N")
fieldnames.append("blank")
fieldnames.append("Postive_Patients_Desc") # intentional—it's spelled wrong in the source JSON
fieldnames.extend(repeat("blank", 7))
fieldnames.append("Unresolved_Postive_Patients_Desc")
fieldnames.extend(repeat("blank", 7))
fieldnames.append("Dashboard_Display")
# output CSV data
si = StringIO()
writer = csv.DictWriter(si, fieldnames, extrasaction='ignore')
writer.writeheader()
for facility in dict_data["features"]:
facility = facility["attributes"]
facility["Date Collected"] = datetime.now(timezone('US/Eastern')).strftime('%Y%m%d')
facility["State"] = "UT"
facility["Unresolved_Postive_Patients_Desc"] = facility["Postive_Patients_Desc"] if facility["Resolved_Y_N"] == "N" else ""
facility["blank"] = ""
writer.writerow(facility)
print(si.getvalue())
| python | Apache-2.0 | a701975875f0dc4b98482854e9e02eb419b1fb14 | 2026-01-05T06:31:17.718705Z | false |
COVID19Tracking/covid-tracking-data | https://github.com/COVID19Tracking/covid-tracking-data/blob/a701975875f0dc4b98482854e9e02eb419b1fb14/data-collection-scripts/cdc-vaccinations-timeseries/make_timeseries.py | data-collection-scripts/cdc-vaccinations-timeseries/make_timeseries.py | import json
import git
import csv
import sys
import click
from collections import defaultdict
# we want our columns to come out in the desired order, and older pythons don't guarantee dict ordering
MIN_PYTHON = (3, 7)
if sys.version_info < MIN_PYTHON:
sys.exit("Python %s.%s or later is required.\n" % MIN_PYTHON)
@click.command()
@click.argument('filename')
@click.argument('json_key')
@click.option('--strip-duplicate-days/--no-strip-duplicate-days', default=False,
help='Process the data to only output one set of data for each day')
def main(filename, json_key, strip_duplicate_days):
repo = git.Repo("../../")
# fetch all the git commits that updated the data file
# thanks to https://stackoverflow.com/q/28803626
revlist = (
(commit, (commit.tree / filename).data_stream.read())
for commit in repo.iter_commits(paths=filename)
)
# build up a dict of the data history by looking at each commit in the git history
data = {}
for commit, filecontents in revlist:
try:
data[commit.committed_datetime] = json.loads(filecontents)
except ValueError as e: # ignore invalid files: a corrupt download can get into the git history
pass
# go through the data and reformat it into one line per batch/state
# if --strip-duplicate-days is set, we only keep the latest set of data for any state/date pair
out_data = []
out_cols = {"runid": None}
seen_data = defaultdict(set) # dict with date keys holding the states we've seen for that date
for data_time, data_batch in data.items():
for state_data in data_batch[json_key]:
state_data["runid"] = data_batch["runid"]
# there's an anomaly in the data where one day has the date in the wrong format.
# reformat it, hoping this was a one-time glitch
if state_data["Date"] == "01/12/2021":
state_data["Date"] = "2021-01-12"
# have we seen this state/date before?
if strip_duplicate_days and state_data["ShortName"] in seen_data[state_data["Date"]]:
continue
# keep track of all the columns that exist as we go (we'll need that list for the CSV output)
for k in state_data.keys():
out_cols[k] = None
# and keep track of the state/date pairs we've seen
seen_data[state_data["Date"]].add(state_data["ShortName"])
# and append the state's data to our output
out_data.append(state_data)
# output the data in CSV format
writer = csv.DictWriter(sys.stdout, out_cols.keys())
writer.writeheader()
writer.writerows(out_data)
if __name__ == "__main__":
main()
| python | Apache-2.0 | a701975875f0dc4b98482854e9e02eb419b1fb14 | 2026-01-05T06:31:17.718705Z | false |
COVID19Tracking/covid-tracking-data | https://github.com/COVID19Tracking/covid-tracking-data/blob/a701975875f0dc4b98482854e9e02eb419b1fb14/data-collection-scripts/long-calculations/longcalcs.py | data-collection-scripts/long-calculations/longcalcs.py | import pandas as pd
import requests
from datetime import datetime, timezone, timedelta
from pytz import timezone as tz # replace with ZoneInfo once G upgrades to 3.9
from io import StringIO, BytesIO
from bs4 import BeautifulSoup
import re
import zipfile
# DE positives
def de(f):
print("Run at: ", datetime.now(tz('US/Eastern')), "\n", file=f)
df = pd.read_csv('https://myhealthycommunity.dhss.delaware.gov/locations/state/download_covid_19_data')
df = df[df['Unit'] == 'tests'].set_index(['Year', 'Month', 'Day']).sort_index()
print(df.loc[df.index.unique()[-3]][['Statistic', 'Value']], file=f)
print("\n\n", file=f)
# HI PCR Test Encounters and update time
def hi(f):
# HI PCR Test Encounters
print("Run at: ", datetime.now(tz('US/Eastern')), "\n", file=f)
hi = pd.read_csv("https://public.tableau.com/views/EpiCurveApr4/CSVDownload.csv?:showVizHome=no")
print(hi.select_dtypes(exclude=['object']).sum(), file=f)
# HI updated time
res = requests.get("https://services9.arcgis.com/aKxrz4vDVjfUwBWJ/arcgis/rest/services/HIEMA_TEST_DATA_PUBLIC_LATEST/FeatureServer/0/query?where=name%3D'State'&returnGeometry=false&outFields=*&orderByFields=reportdt desc&resultOffset=0&resultRecordCount=1&f=json")
updated = datetime.fromtimestamp(res.json()['features'][0]['attributes']['reportdt']/1000) # because ms
# format we want: 12/27/2020 8:30:00
print("\nUpdate time: ", updated.replace(tzinfo=timezone.utc).astimezone(tz=tz("Pacific/Honolulu")).strftime("%m/%d/%Y %H:%M:%S"), file=f)
print("\n\n", file=f)
# MA Cases, Tests, and Hospitalizations
def ma(f):
print("Run at: ", datetime.now(tz('US/Eastern')), "\n", file=f)
url = 'https://www.mass.gov/info-details/covid-19-response-reporting'
req = requests.get(url)
soup = BeautifulSoup(req.text, 'html.parser')
a = soup.find('a', string=re.compile("COVID-19 Raw Data"))
link = "https://www.mass.gov{}".format(a['href'])
print("Download link = ", link, file=f)
res = requests.get(link)
tabs = pd.read_excel(res.content, sheet_name=None)
print("PCR Total People", file=f)
print(tabs['Testing2 (Report Date)']['Molecular Total'].iloc[-1], "\n", file=f)
df = tabs['TestingByDate (Test Date)'].filter(like="All Positive")
print(df.sum(), file=f)
print("\n\n", file=f)
# weekly report
url = 'https://www.mass.gov/info-details/covid-19-response-reporting'
req = requests.get(url)
soup = BeautifulSoup(req.text, 'html.parser')
a = soup.find('a', string=re.compile("Weekly Public Health Report - Raw"))
link = "https://www.mass.gov{}".format(a['href'])
print("\nWeekly link = ", link, file=f)
res = requests.get(link)
df = pd.read_excel(BytesIO(res.content), sheet_name='Antibody', parse_dates=['Test Date'], index_col='Test Date')
print(df.sum(), file=f)
# ever hospitalized
print('\nEver Hospitalized', "\n", file=f)
max_date = tabs['RaceEthnicityLast2Weeks']['Date'].max()
print(tabs['RaceEthnicityLast2Weeks'][tabs['RaceEthnicityLast2Weeks']['Date'] == max_date].sum(), file=f)
print("\n\n", file=f)
# ME Hospitalizations
def me(f):
print("Run at: ", datetime.now(tz('US/Eastern')), "\n", file=f)
print(pd.read_csv("https://gateway.maine.gov/dhhs-apps/mecdc_covid/hospital_capacity.csv", nrows=1, usecols=[0,1,2,3]).sum(), file=f)
# Testing
me = pd.read_csv('https://analytics.maine.gov/t/CDCExternal/views/covid-19-maine-cdc-dashboard/7_DailyLabResults.csv', thousands=',', parse_dates=['Day of Lab Received Date'])
me['Positive'] = me['Positive Tests'].fillna(me['Positive Tests Flexible'].fillna(0))
print(me.pivot(index='Day of Lab Received Date', columns='Type', values=['All Tests', 'Positive']).sum(), file=f)
print("\n\n", file=f)
# MI Testing
def mi(f):
print("Run at: ", datetime.now(tz('US/Eastern')), "\n", file=f)
url = 'https://www.michigan.gov/coronavirus/0,9753,7-406-98163_98173---,00.html'
req = requests.get(url)
soup = BeautifulSoup(req.text, 'html.parser')
a = soup.find('a', string="Diagnostic Tests by Result and County")
mi_link = "https://www.michigan.gov/{}".format(a['href'])
print("Link = ", mi_link, file=f)
mi = pd.read_excel(mi_link).drop(columns=['COUNTY'])
print(mi.sum(), file=f)
print("\n\n", file=f)
# NC Antigen tests
def nc(f):
print("Run at: ", datetime.now(tz('US/Eastern')), "\n", file=f)
nc = pd.read_csv("https://public.tableau.com/views/NCDHHS_COVID-19_DataDownload/DailyTestingMetrics.csv", parse_dates=['Date'], index_col='Date', thousands=',')
nc = nc.pivot(columns='Measure Names', values='Measure Values')
nc['Molecular Test'] = nc['Molecular Test'].fillna(nc['Daily Tests Total'].fillna(0))
print(nc.sum().astype('int64'), file=f)
print("\n\n", file=f)
# ND Negatives and Testing
def nd(f):
print("Run at: ", datetime.now(tz('US/Eastern')), "\n", file=f)
url = "https://static.dwcdn.net/data/NVwou.csv"
headers = {"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:79.0) Gecko/20100101 Firefox/79.0"}
req = requests.get(url, headers=headers)
print(pd.read_csv(StringIO(req.text)).filter(like='Negative').sum(), file=f)
print("\nTesting Data", file=f)
df = pd.read_csv('https://www.health.nd.gov/sites/www/files/documents/Files/MSS/coronavirus/charts-data/PublicUseData.csv')
print(df.filter(like='tests').sum(), file=f)
print("\n\n", file=f)
# OH testing
def oh(f):
print("Run at: ", datetime.now(tz('US/Eastern')), "\n", file=f)
key_url = "https://data.ohio.gov/apigateway-secure/data-portal/download-file/cba54974-06ab-4ec8-92bc-62a83b40614e?key=2b4420ffc0c5885f7cd42a963cfda0b489a9a6dff49461e1a921b355ee0424c029cf4ff2ee80c8c82ef901d818d71f9def8cba3651f6595bd6a07e1477438b97bbc5d7ccf7b5b66c154779ce7a4f5b83"
testing_url = "https://data.ohio.gov/apigateway-secure/data-portal/download-file/2ad05e55-2b1a-486c-bc07-ecb3be682d29?key=e42285cfa9a0b157b3f1bdaadcac509c44db4cfa0f90735e12b770acb1307b918cee14d5d8e4d4187eb2cab71fc9233bda8ee3eed924b8a3fad33aaa6c8915fe6f3de6f82ad4b995c2359b168ed88fa9"
url = testing_url
pd.options.display.float_format = '{:.2f}'.format
print(pd.read_csv(requests.get(url).json()['url']).filter(like='Daily').sum(), file=f)
print("\n\n", file=f)
# TX Time, Testing, and ICU
def tx(f):
print("Run at: ", datetime.now(tz('US/Eastern')), "\n", file=f)
url = 'https://www.dshs.texas.gov/coronavirus/TexasCOVID-19HospitalizationsOverTimebyTSA.xlsx'
df = pd.read_excel(url, sheet_name='COVID-19 ICU', skiprows=2)
print("ICU", file=f)
print(df.loc[df[df.columns[0]] == 'Total'][df.columns[-1]], file=f)
# PCR Positives
res = requests.get('https://services5.arcgis.com/ACaLB9ifngzawspq/arcgis/rest/services/TX_DSHS_COVID19_TestData_Service/FeatureServer/6/query?where=1%3D1&outStatistics=%5B%7B%27statisticType%27%3A+%27sum%27%2C+%27onStatisticField%27%3A+%27NewPositive%27%7D%2C+%7B%27statisticType%27%3A+%27sum%27%2C+%27onStatisticField%27%3A+%27OldPositive%27%7D%5D&f=json')
print("\nPCR Positives", file=f)
print(sum(res.json()['features'][0]['attributes'].values()), file=f)
res = requests.get('https://services5.arcgis.com/ACaLB9ifngzawspq/ArcGIS/rest/services/TX_DSHS_COVID19_Cases_Service/FeatureServer/2/query?where=1%3D1&outFields=%2A&orderByFields=Date+desc&resultRecordCount=1&f=json')
print("\nCases Timestamp (as-of)", file=f)
cases_date = datetime.fromtimestamp(res.json()['features'][0]['attributes']['Date']/1000)
# convent to TX time through trickery (from UTC)
print(cases_date - timedelta(hours=6), file=f)
# Antigen Positives
res = requests.get('https://services5.arcgis.com/ACaLB9ifngzawspq/ArcGIS/rest/services/TX_DSHS_COVID19_TestData_Service/FeatureServer/3/query?where=1%3D1&objectIds=&time=&resultType=none&outFields=*&returnIdsOnly=false&returnUniqueIdsOnly=false&returnCountOnly=false&returnDistinctValues=false&cacheHint=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&having=&resultOffset=&resultRecordCount=&sqlFormat=none&f=json')
print("\nAntigen Positives", file=f)
print(res.json()['features'][5]['attributes']['Count_'], file=f)
# Antibody Positives
print("\nAntibody Positives", file=f)
print(res.json()['features'][2]['attributes']['Count_'], file=f)
print("\n\n", file=f)
# UT Testing
def ut(f):
print("Run at: ", datetime.now(tz('US/Eastern')), "\n", file=f)
url = 'https://coronavirus-dashboard.utah.gov/Utah_COVID19_data.zip'
res = requests.get(url)
zipdata = BytesIO(res.content)
zip = zipfile.ZipFile(zipdata, 'r')
for zf in zip.filelist:
if zf.filename.startswith('Overview_Total Tests by Date'):
# yay, the testing file
title = 'Tests'
elif zf.filename.startswith('Overview_Number of People Tested by Date'):
title = 'People'
else:
title = None
if title:
title = "Metrics for {} (from {})".format(title, zf.filename)
print(title, "\n"+"="*len(title), file=f)
df = pd.read_csv(zip.open(zf.filename)).drop(columns=[' Total Daily Tests', 'Total Positive Tests', 'Daily People Tested', 'Daily Positive Tests'], errors="ignore")
print(df.groupby(['Test Type', 'Result']).sum(), file=f)
print(df.groupby('Test Type').sum(), file=f)
print("\n\n", file=f)
# WA Testing
def wa(f):
print("Run at: ", datetime.now(tz('US/Eastern')), "\n", file=f)
wa_link = 'https://www.doh.wa.gov/Portals/1/Documents/1600/coronavirus/data-tables/PUBLIC_Tests_by_Specimen_Collection.xlsx'
print("Link = ", wa_link, file=f)
wa = pd.read_excel(wa_link, sheet_name = 'State').filter(regex='(Positive|Negative)').drop(columns='Positive tests (%)')
wa.columns = [x.split()[0] for x in wa.columns]
print(wa.groupby(wa.columns.values, axis=1).sum().sum(), file=f)
print("\n\n", file=f)
# WI PCR Testing Encounters
def wi(f):
print("Run at: ", datetime.now(tz('US/Eastern')), "\n", file=f)
wi = pd.read_csv("https://bi.wisconsin.gov/t/DHS/views/PercentPositivebyTestPersonandaComparisonandTestCapacity/TestCapacityDashboard.csv", thousands=",")
print("PCR Testing Encounters: " + str(wi[wi['Measure Names'] == 'Total people tested daily']['Number of Tests'].sum()), file=f)
print("\n\n", file=f)
# WV Testing
def wv(f):
print("Run at: ", datetime.now(tz('US/Eastern')).isoformat(), "\n", file=f)
wv = pd.read_csv("https://raw.githubusercontent.com/COVID19Tracking/covid19-datafetcher/data/wv_lab_tests.csv", thousands=",")
print(wv.loc[:, wv.columns != 'date'].sum(axis=0), file=f)
print("\n\n", file=f)
def main():
state = ['de','hi','ma','me', 'mi', 'nc', 'nd', 'oh', 'tx', 'ut', 'wa', 'wi', 'wv']
for s in state:
path = '../../data/long_calculations/' + s + '.txt'
fun = globals()[s]
with open(path, 'a+') as f:
try:
fun(f)
except:
print("Encountered an error running at", datetime.now(tz('US/Eastern')), file=f)
if __name__ == "__main__":
main()
| python | Apache-2.0 | a701975875f0dc4b98482854e9e02eb419b1fb14 | 2026-01-05T06:31:17.718705Z | false |
COVID19Tracking/covid-tracking-data | https://github.com/COVID19Tracking/covid-tracking-data/blob/a701975875f0dc4b98482854e9e02eb419b1fb14/data-collection-scripts/cdc-by-state/scrape.py | data-collection-scripts/cdc-by-state/scrape.py | """NOTE: this is meant to be run from the top-level repo directory.
"""
import json
import pandas as pd
import requests
STATES = ["AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", "GA",
"HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD",
"MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
"NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC",
"SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"]
def scrape():
for state in STATES:
print('Processing %s' % state)
url = 'https://covid.cdc.gov/covid-data-tracker/COVIDData/getAjaxData?id=integrated_county_timeseries_state_%s_external' % state
result_json = json.loads(requests.get(url).text)
df = pd.DataFrame(result_json['integrated_county_timeseries_external_data'])
path = 'data/cdc_by_state/%s_integrated_county_timeseries.csv' % state
df.to_csv(path, index=False)
scrape()
| python | Apache-2.0 | a701975875f0dc4b98482854e9e02eb419b1fb14 | 2026-01-05T06:31:17.718705Z | false |
COVID19Tracking/covid-tracking-data | https://github.com/COVID19Tracking/covid-tracking-data/blob/a701975875f0dc4b98482854e9e02eb419b1fb14/data-collection-scripts/fda-covid-ivd-euas/scrape.py | data-collection-scripts/fda-covid-ivd-euas/scrape.py | import requests
import csv
import sys
from bs4 import BeautifulSoup
def scrape():
url = "https://www.fda.gov/emergency-preparedness-and-response/mcm-legal-regulatory-and-policy-framework/emergency-use-authorization#covidinvitrodev"
sys.stderr.write("getting data from: %s\n" % url)
page = requests.get(url)
if page.status_code != 200:
raise ValueError("failed to get the webpage, expected a 200, got %d" % page.status_code)
soup = BeautifulSoup(page.text, 'html.parser')
tables = soup.find_all('table')
if tables == None:
raise ValueError("failed to get any table from the page, maybe the url is wrong...")
table = None
for t in tables:
if "Diagnostic" in str(t.find_all('tr')[0]):
table = t
if table == None:
raise ValueError("Could not find 'In Vitro Diagnostic Products' Table")
rows = table.find_all('tr')[1:]
table_headers = [
'Date EUA First Issued',
'Most recent authorization or revision',
'Entity',
'Diagnostic (Most Recent Letter of Authorization)',
'Prduct attributes',
'Authorized Setting',
'Authorization Labeling',
'Amendments and Other Documents',
'Federal Register Notice for EUA',
'Other Brand Name(s)'
]
f = csv.writer(sys.stdout)
f.writerow(table_headers)
for row in rows:
cell = []
for td in row.find_all('td'):
cell_text = td.text
if td.a:
cell_text = get_links(td)
cell.append(cell_text)
f.writerow(cell)
sys.stderr.write("success !!\n")
def get_links(td):
cell_text = ""
hrefs = td.find_all('a')
for h in hrefs:
href = h.get('href')
if not href.startswith("https"):
cell_text = cell_text + "\nhttps://www.fda.gov" + href
else:
cell_text = cell_text + "\n" + href
return cell_text
scrape()
| python | Apache-2.0 | a701975875f0dc4b98482854e9e02eb419b1fb14 | 2026-01-05T06:31:17.718705Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/tests/test_utils.py | tests/test_utils.py | import pytest
import testutils
import utils
def test_read_write_json_file():
test_data = {"a": 1, "b": 2, "c": {"d": 3, "e": 4}}
utils.write_json_file("test.json", test_data)
read_data = utils.read_json_file("test.json")
assert test_data == read_data
missing_file = utils.read_json_file("xtest.json", empty_if_missing=True)
assert missing_file == {}
@pytest.mark.skip(reason="waiting for https://github.com/LukePrior/nbn-upgrade-map/pull/177")
def test_minimised_json():
test_data = {"a": 1, "b": 2, "c": {"d": 3, "e": 4}}
utils.write_json_file("test.json", test_data, indent=0)
s = testutils.read_file_string("test.json")
assert s == '{"a":1,"b":2,"c":{"d":3,"e":4}}'
def test_progress_bar(capsys):
utils.print_progress_bar(0, 100, prefix="Progress:", suffix="Complete", length=50)
utils.print_progress_bar(25, 100, prefix="Progress:", suffix="Complete", length=50)
utils.print_progress_bar(100, 100, prefix="Progress:", suffix="Complete", length=50)
captured = capsys.readouterr()
assert captured.out.count("Progress:") == 3
assert "0.0%" in captured.out
assert "100.0%" in captured.out
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/tests/test_combined_suburbs.py | tests/test_combined_suburbs.py | import os
from datetime import datetime
import main
import pytest
import suburbs
import testutils
def _dummy_read_json_file_combined_suburbs(filename: str) -> dict:
"""Fake combined-suburbs.json file."""
if filename == "results/combined-suburbs.json":
return testutils.read_test_data_json("combined-suburbs.json")
raise NotImplementedError
def test_select_suburb(monkeypatch):
"""Test main.select_suburb()."""
monkeypatch.setattr("utils.read_json_file", _dummy_read_json_file_combined_suburbs)
# test explicit suburb
selector = main.select_suburb("Acton", "ACT")
assert next(selector)[0] == "ACTON" # unprocessed 1
with pytest.raises(StopIteration):
next(selector)
# test select order
selector = main.select_suburb(None, None)
assert next(selector)[0] == "ACTON" # unprocessed 1
assert next(selector)[0] == "AMAROO" # unprocessed 2
assert next(selector)[0] == "ARANDA" # old announced
assert next(selector)[0] == "AINSLIE" # old unannounced
with pytest.raises(StopIteration):
next(selector)
def test_write_suburbs(monkeypatch):
"""Test suburbs.write_all_suburbs()."""
SAVED_JSON = {}
def dummy_write_json_file(filename: str, data: dict, indent=4):
SAVED_JSON[filename] = data
monkeypatch.setattr("utils.read_json_file", _dummy_read_json_file_combined_suburbs)
monkeypatch.setattr("suburbs.utils.write_json_file", dummy_write_json_file)
all_suburbs = suburbs.read_all_suburbs()
suburbs.write_all_suburbs(all_suburbs)
assert len(SAVED_JSON) == 1, "Should only be one file"
states = SAVED_JSON["results/combined-suburbs.json"]
assert len(states) == 1, "Should only be one state"
assert len(states["ACT"]) == 4, "Should be 4 suburbs in ACT"
def test_suburb_data(monkeypatch):
monkeypatch.setattr("utils.read_json_file", _dummy_read_json_file_combined_suburbs)
all_suburbs = suburbs.read_all_suburbs()
assert all_suburbs["ACT"][0].internal == "ACTON"
assert all_suburbs["ACT"][0].file == "acton"
assert all_suburbs["ACT"][0] != all_suburbs["ACT"][1]
def test_get_suburb_progress(monkeypatch):
"""Test suburbs.get_suburb_progress()."""
monkeypatch.setattr("utils.read_json_file", _dummy_read_json_file_combined_suburbs)
progress = suburbs.get_suburb_progress()
assert progress["all"]["ACT"] == {"done": 2, "percent": 50.0, "total": 4}
def test_get_address_progress(monkeypatch):
"""Test suburbs.get_address_progress()."""
monkeypatch.setattr("utils.read_json_file", _dummy_read_json_file_combined_suburbs)
progress = suburbs.get_address_progress()
assert progress["all"]["TOTAL"] == {"done": 3670, "percent": 57.8, "total": 6354}
def test_update_progress(monkeypatch):
SAVED_JSON = {}
def dummy_write_json_file(filename: str, data: dict, indent=4):
SAVED_JSON[filename] = data
monkeypatch.setattr("utils.read_json_file", _dummy_read_json_file_combined_suburbs)
monkeypatch.setattr("suburbs.utils.write_json_file", dummy_write_json_file)
results = suburbs.update_progress()
assert results is not None
assert results == SAVED_JSON["results/progress.json"]["suburbs"]
assert len(SAVED_JSON) == 1, "Should only be one file"
progress = SAVED_JSON["results/progress.json"]
assert progress["suburbs"]["all"]["TOTAL"]["done"] == 2
assert progress["suburbs"]["all"]["TOTAL"]["total"] == 4
assert progress["suburbs"]["all"]["TOTAL"]["percent"] == 50.0
assert progress["addresses"]["all"]["TOTAL"]["done"] == 3670
assert progress["addresses"]["all"]["TOTAL"]["total"] == 6354
assert progress["addresses"]["all"]["TOTAL"]["percent"] == 57.8
def test_update_processed_dates(monkeypatch):
SAVED_JSON = {}
def _dummy_glob(pathname, *, root_dir=None, dir_fd=None, recursive=False):
if pathname == "results/ACT/*.geojson":
dir_path = os.path.dirname(os.path.realpath(__file__))
return [f"{dir_path}/data/acton.geojson"] # acton, 2023-07-07T03:54:25.154530
return []
def dummy_write_json_file(filename: str, data: dict, indent=4):
SAVED_JSON[filename] = data
def _dummy_read_json_file(filename: str) -> dict:
"""Fake combined-suburbs.json and geojson files."""
if filename == "results/combined-suburbs.json":
return testutils.read_test_data_json("combined-suburbs.json")
elif filename.endswith("acton.geojson"):
return testutils.read_test_data_json("sample2.geojson")
raise NotImplementedError
monkeypatch.setattr("utils.read_json_file", _dummy_read_json_file)
monkeypatch.setattr("suburbs.utils.read_json_file", _dummy_read_json_file)
monkeypatch.setattr("suburbs.glob.glob", _dummy_glob)
monkeypatch.setattr("suburbs.utils.write_json_file", dummy_write_json_file)
suburbs.update_processed_dates()
assert len(SAVED_JSON) == 1, "Should only be one file"
acton_suburb = SAVED_JSON["results/combined-suburbs.json"]["ACT"][0]
assert acton_suburb["processed_date"] == "2023-07-07T03:54:25.154530"
# Verify that address_count is updated to match the GeoJSON features count
assert acton_suburb["address_count"] == 3 # sample2.geojson has 3 features
def test_update_suburb_in_all_suburbs(monkeypatch):
SAVED_JSON = {}
def dummy_write_json_file(filename: str, data: dict, indent=4):
SAVED_JSON[filename] = data
def dummy_get_geojson_file_generated(filename) -> datetime:
assert filename == "results/ACT/acton.geojson"
return datetime(2023, 7, 7, 3, 54, 25, 154530)
def dummy_get_geojson_file_generated_none(filename) -> datetime:
assert filename == "results/ACT/acton.geojson"
return None # simulate no file
def dummy_read_geojson_file(suburb: str, state: str) -> dict:
if suburb.lower() == "acton" and state.upper() == "ACT":
return testutils.read_test_data_json("sample2.geojson")
return None
def dummy_read_geojson_file_none(suburb: str, state: str) -> dict:
return None # simulate file doesn't exist
monkeypatch.setattr("utils.read_json_file", _dummy_read_json_file_combined_suburbs)
monkeypatch.setattr("suburbs.utils.write_json_file", dummy_write_json_file)
monkeypatch.setattr("geojson.get_geojson_file_generated", dummy_get_geojson_file_generated)
monkeypatch.setattr("suburbs.read_geojson_file", dummy_read_geojson_file)
suburbs.update_suburb_in_all_suburbs("ACTON", "ACT")
assert len(SAVED_JSON) == 2, "progress and combined-suburbs should be written"
acton_suburb = SAVED_JSON["results/combined-suburbs.json"]["ACT"][0]
assert acton_suburb["name"] == "Acton"
assert acton_suburb["processed_date"] == "2023-07-07T03:54:25.154530"
# Verify that address_count is updated to match the GeoJSON features count
assert acton_suburb["address_count"] == 3 # sample2.geojson has 3 features
monkeypatch.setattr("geojson.get_geojson_file_generated", dummy_get_geojson_file_generated_none)
monkeypatch.setattr("suburbs.read_geojson_file", dummy_read_geojson_file_none)
suburbs.update_suburb_in_all_suburbs("ACTON", "ACT")
assert len(SAVED_JSON) == 2, "progress and combined-suburbs should be written"
acton_suburb = SAVED_JSON["results/combined-suburbs.json"]["ACT"][0]
assert acton_suburb["name"] == "Acton"
assert datetime.fromisoformat(acton_suburb["processed_date"]).date() == datetime.now().date()
# When GeoJSON file doesn't exist, address_count should be set to 0
assert acton_suburb["address_count"] == 0
def test_get_technology_breakdown(monkeypatch):
def _dummy_read_json_file(filename: str) -> dict:
if filename == "results/combined-suburbs.json":
return testutils.read_test_data_json("combined-suburbs.json") # four ACT suburbs
elif filename.startswith("results/ACT/"):
return testutils.read_test_data_json("sample2.geojson") # two FTTP, one FTTN
raise NotImplementedError
monkeypatch.setattr("utils.read_json_file", _dummy_read_json_file)
monkeypatch.setattr("geojson.read_json_file", _dummy_read_json_file)
monkeypatch.setattr("geojson.os.path.exists", lambda x: True)
breakdown = suburbs.get_technology_breakdown()
assert breakdown["ACT"]["FTTP"] == 8
assert breakdown["ACT"]["FTTN"] == 4
assert breakdown["ACT"]["total"] == 12
assert breakdown["TOTAL"]["total"] == 12
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/tests/test_geojson.py | tests/test_geojson.py | import datetime
import os
import geojson
import utils
from data import Address
def test_read_geojson(monkeypatch):
dir_path = os.path.dirname(os.path.realpath(__file__))
monkeypatch.setattr("geojson.os.path.exists", lambda path: True)
monkeypatch.setattr(
"geojson.read_json_file", lambda filename: utils.read_json_file(f"{dir_path}/data/sample2.geojson")
)
stuff = geojson.read_geojson_file("MyTown", "ABC")
assert stuff is not None
assert stuff["type"] == "FeatureCollection"
assert stuff["suburb"] == "ACTON"
addresses, generated = geojson.read_geojson_file_addresses("MyTown", "ABC")
assert generated == datetime.datetime.fromisoformat("2023-07-07T03:54:25.154530")
assert addresses is not None
assert len(addresses) == 3
assert addresses[0] == Address(
name="21 MCCOY CIRCUIT ACTON 2601",
gnaf_pid="GAACT714876373",
longitude=149.12072415,
latitude=-35.28414781,
loc_id="ChIJBXWXMEdNFmsRoN6pR5X8gC4",
tech="FTTP",
upgrade="UNKNOWN",
)
def test_write_geojson(monkeypatch):
SAVED_JSON = {}
def dummy_write_json_file(filename: str, data: dict, indent=4):
SAVED_JSON[filename] = data
monkeypatch.setattr("geojson.write_json_file", dummy_write_json_file)
monkeypatch.setattr("geojson.os.makedirs", lambda name, mode=0o777, exist_ok=False: None)
addresses = [
Address(name="1 Fake St", gnaf_pid="GNAF123", longitude=123.456, latitude=-12.345, upgrade="XYZ", tech="FTTP"),
Address(name="2 Fake St", gnaf_pid="GNAF456", longitude=123.456, latitude=-12.345, upgrade="ABC", tech="FTTN"),
Address(name="3 Fake St", gnaf_pid="GNAF789", longitude=123.456, latitude=-12.345, upgrade="ABC"),
Address(name="4 Fake St", gnaf_pid="GNAF007", longitude=123.456, latitude=-12.345, tech="ABC"),
Address(
name="5 Fake St",
gnaf_pid="GNAF808",
longitude=123.456,
latitude=-12.345,
upgrade="ABC",
tech="FTTN",
tech_change_status="Committed",
program_type="On-Demand N2P SDU/MDU Simple",
target_eligibility_quarter="Jun 2024",
),
]
generated = datetime.datetime.now() - datetime.timedelta(days=1)
geojson.write_geojson_file("MyTown", "ABC", addresses, generated)
info = SAVED_JSON["results/ABC/mytown.geojson"]
assert info["type"] == "FeatureCollection"
assert info["suburb"] == "MyTown"
assert info["generated"] == generated.isoformat()
assert len(info["features"]) == 3, "addresses with no tech or upgrade should not be included"
assert info["features"][0]["type"] == "Feature"
assert info["features"][0]["properties"]["upgrade"] == "XYZ"
assert info["features"][0]["properties"]["tech"] == "FTTP"
assert "tech_change_status" not in info["features"][0]["properties"]
assert "program_type" not in info["features"][0]["properties"]
assert "target_eligibility_quarter" not in info["features"][0]["properties"]
assert info["features"][2]["properties"]["tech_change_status"] == "Committed"
assert info["features"][2]["properties"]["program_type"] == "On-Demand N2P SDU/MDU Simple"
assert info["features"][2]["properties"]["target_eligibility_quarter"] == "Jun 2024"
geojson.write_geojson_file("MyTown", "ABC", addresses)
info = SAVED_JSON["results/ABC/mytown.geojson"]
was_generated = datetime.datetime.fromisoformat(info["generated"])
assert was_generated - datetime.datetime.now() < datetime.timedelta(seconds=5)
def test_geojson_generated(monkeypatch):
dir_path = os.path.dirname(os.path.realpath(__file__))
# check date at top (partial read)
generated = geojson.get_geojson_file_generated(f"{dir_path}/data/sample1.geojson")
assert generated is not None
assert generated.date() == datetime.date(2023, 7, 7)
# check date at bottom (full read)
generated = geojson.get_geojson_file_generated(f"{dir_path}/data/sample2.geojson")
assert generated is not None
assert generated.date() == datetime.date(2023, 7, 7)
# check date at bottom with incomplete top-few JSON (full read)
generated = geojson.get_geojson_file_generated(f"{dir_path}/data/sample3.geojson")
assert generated is not None
assert generated.date() == datetime.date(2023, 7, 7)
# check generated name path
dir_path = os.path.dirname(os.path.realpath(__file__))
monkeypatch.setattr("geojson.get_geojson_filename", lambda suburb, state: f"{dir_path}/data/sample2.geojson")
generated = geojson.get_geojson_file_generated_from_name("MyTown", "ABC")
assert generated is not None
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/tests/test_main.py | tests/test_main.py | import copy
import datetime
import main
import test_nbn
import testutils
import update_breakdown
from data import Address
from nbn import CACHE, NBNApi
def get_nbn_data_json(self, url) -> dict:
"""Return canned NBN API data for testing"""
if url.startswith("https://places.nbnco.net.au/places/v1/autocomplete?query="):
query = url.split("=")[-1]
return testutils.read_test_data_json(f"nbn/query.{query}.json")
elif url.startswith("https://places.nbnco.net.au/places/v2/details/"):
loc_id = url.split("/")[-1]
return testutils.read_test_data_json(f"nbn/details.{loc_id}.json")
raise NotImplementedError
def test_get_address(monkeypatch):
monkeypatch.setattr("nbn.NBNApi.get_nbn_data_json", get_nbn_data_json)
CACHE.clear()
nbn = NBNApi()
address = Address(
name="1 BLUEGUM RISE ANSTEAD 4070",
gnaf_pid="GAQLD425035994",
longitude=-27.56300033,
latitude=152.85904758,
)
out_address = main.get_address(nbn, copy.copy(address), get_status=False)
assert out_address.name == address.name
assert out_address.loc_id == "LOC000126303452"
assert out_address.tech is None
assert out_address.upgrade is None
out_address = main.get_address(nbn, copy.copy(address), get_status=True)
assert out_address.loc_id == "LOC000126303452"
assert out_address.tech == "FTTN"
assert out_address.upgrade == "FTTP_SA"
def test_remove_duplicate_addresses():
addresses = [
Address(name=f"{n} Fake St", gnaf_pid=f"GNAF00{n}", longitude=123.456, latitude=-12.345, loc_id=str(n))
for n in range(5)
]
addresses.append(copy.copy(addresses[0]))
new_addresses = main.remove_duplicate_addresses(addresses)
assert len(addresses) == 6
assert len(new_addresses) == 5
assert [a.loc_id for a in new_addresses] == [str(n) for n in range(5)]
def test_update_breakdown(monkeypatch):
SAVED_JSON = {}
def _dummy_read_json_file(filename: str, empty_if_missing=False) -> dict:
if filename == "results/breakdown.json" and empty_if_missing:
return {}
elif filename == "results/breakdown-suburbs.json" and empty_if_missing:
return {}
elif filename == "results/combined-suburbs.json":
return testutils.read_test_data_json("combined-suburbs.json") # four ACT suburbs
elif filename.startswith("results/ACT/"):
return testutils.read_test_data_json("sample2.geojson") # two FTTP, one FTTN
raise NotImplementedError(f"Unexpected filename: {filename}")
def _dummy_write_json_file(filename: str, data: dict, indent=4):
SAVED_JSON[filename] = data
def _dummy_glob(pathname, *, root_dir=None, dir_fd=None, recursive=False):
return ["results/ACT/acton.geojson", "results/ACT/braddon.geojson"]
monkeypatch.setattr("utils.read_json_file", _dummy_read_json_file)
monkeypatch.setattr("utils.write_json_file", _dummy_write_json_file)
monkeypatch.setattr("adhoc_tools.glob.glob", _dummy_glob)
bd = update_breakdown.update_breakdown()
date_key = datetime.datetime.now().date().isoformat()
assert len(bd) == 1
assert date_key in bd
assert len(bd[date_key]) == 2
assert bd[date_key]["tech"]["FTTP"] == 4
assert bd[date_key]["tech"]["FTTN"] == 2
assert bd[date_key]["upgrade"]["NULL_NA"] == 2
update_breakdown.print_breakdowns(bd)
# TODO: check output?
def test_update_breakdown_rerun(monkeypatch):
date_key = datetime.datetime.now().date().isoformat()
dummy_value = "DUMMY_VALUE"
def _dummy_read_json_file(filename: str, empty_if_missing=False) -> dict:
if filename == "results/breakdown.json" and empty_if_missing:
return {date_key: dummy_value}
elif filename == "results/breakdown-suburbs.json" and empty_if_missing:
return {date_key: dummy_value}
raise NotImplementedError(f"Unexpected filename: {filename}")
monkeypatch.setattr("utils.read_json_file", _dummy_read_json_file)
bd = update_breakdown.update_breakdown()
assert len(bd) == 1
assert bd[date_key] == dummy_value
def test_nbn_to_data(monkeypatch):
monkeypatch.setattr("nbn.requests.Session.get", test_nbn.requests_session_get)
nbn = NBNApi()
# test uncached
CACHE.clear()
address = Address(
name="1 BLUEGUM RISE ANSTEAD 4070",
gnaf_pid="GAQLD425035994",
longitude=-27.56300033,
latitude=152.85904758,
)
out_address = main.get_address(nbn, copy.copy(address), get_status=True)
assert out_address.loc_id == "LOC000126303452"
assert out_address.tech == "FTTN"
assert out_address.tech_change_status == "Committed"
assert out_address.program_type == "On-Demand N2P SDU/MDU Simple"
assert out_address.target_eligibility_quarter == "Jun 2024"
def test_fttp_address_caching(monkeypatch):
"""Test that FTTP addresses from cache are not re-fetched from API"""
monkeypatch.setattr("nbn.NBNApi.get_nbn_data_json", get_nbn_data_json)
# Clear caches
CACHE.clear()
main.GNAF_PID_TO_FTTP_ADDRESS = {}
nbn = NBNApi()
# Create an FTTP address to cache
fttp_address = Address(
name="1 TEST STREET TEST SUBURB 4000",
gnaf_pid="GAQLD999999999",
longitude=152.85905364,
latitude=-27.56298776,
loc_id="LOC000999999999",
tech="FTTP",
upgrade="NULL_NA",
)
# Cache the FTTP address
main.GNAF_PID_TO_FTTP_ADDRESS["GAQLD999999999"] = fttp_address
# Create a new address with same gnaf_pid but without tech info
test_address = Address(
name="1 TEST STREET TEST SUBURB 4000",
gnaf_pid="GAQLD999999999",
longitude=152.85905364,
latitude=-27.56298776,
)
# Track if API was called
api_calls = []
original_get_details = nbn.get_nbn_loc_details
def track_api_calls(loc_id):
api_calls.append(loc_id)
return original_get_details(loc_id)
monkeypatch.setattr(nbn, "get_nbn_loc_details", track_api_calls)
# Process the address
result = main.get_address(nbn, copy.copy(test_address), get_status=True)
# Verify cached data was used
assert result.tech == "FTTP"
assert result.upgrade == "NULL_NA"
assert result.loc_id == "LOC000999999999"
# Verify API was NOT called (FTTP addresses should skip API calls)
assert len(api_calls) == 0, "API should not be called for cached FTTP addresses"
def test_non_fttp_address_still_fetched(monkeypatch):
"""Test that non-FTTP addresses are still fetched from API even if cache exists"""
monkeypatch.setattr("nbn.NBNApi.get_nbn_data_json", get_nbn_data_json)
# Clear caches
CACHE.clear()
main.GNAF_PID_TO_FTTP_ADDRESS = {}
nbn = NBNApi()
# Create an FTTN address (non-FTTP) - should NOT be cached
address = Address(
name="1 BLUEGUM RISE ANSTEAD 4070",
gnaf_pid="GAQLD425035994",
longitude=-27.56300033,
latitude=152.85904758,
)
# Track if API was called
api_calls = []
original_get_details = nbn.get_nbn_loc_details
def track_api_calls(loc_id):
api_calls.append(loc_id)
return original_get_details(loc_id)
monkeypatch.setattr(nbn, "get_nbn_loc_details", track_api_calls)
# Process the address
result = main.get_address(nbn, copy.copy(address), get_status=True)
# Verify API WAS called (non-FTTP addresses should still fetch from API)
assert len(api_calls) == 1, "API should be called for non-FTTP addresses"
assert result.tech == "FTTN"
assert result.upgrade == "FTTP_SA"
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/tests/test_db.py | tests/test_db.py | import os
from argparse import ArgumentParser, Namespace
import db
SAMPLE_ADDRESSES_DB_FILE = f"{os.path.dirname(os.path.realpath(__file__))}/data/sample-addresses.sqlite"
def test_get_address():
address_db = db.connect_to_db(Namespace(dbhost=SAMPLE_ADDRESSES_DB_FILE))
addresses = address_db.get_addresses("SOMERVILLE", "VIC")
assert len(addresses) == 30
assert addresses[0].name == "83 GUELPH STREET SOMERVILLE 3912"
assert addresses[0].gnaf_pid == "GAVIC421048228"
def test_get_counts_by_suburb():
address_db = db.connect_to_db(Namespace(dbhost=SAMPLE_ADDRESSES_DB_FILE))
counts = address_db.get_counts_by_suburb()
assert counts["VIC"]["SOMERVILLE"] == 30
assert counts["VIC"]["SOMERS"] == 10
assert counts["VIC"]["SOMERTON"] == 1
assert len(counts["NSW"]) == 2
assert len(counts["SA"]) == 1
assert len(counts["TAS"]) == 1
assert len(counts["WA"]) == 1
def test_get_extents_by_suburb():
address_db = db.connect_to_db(Namespace(dbhost=SAMPLE_ADDRESSES_DB_FILE))
extents = address_db.get_extents_by_suburb()
assert extents["VIC"]["SOMERVILLE"] == (
(-38.23846838, 145.162399),
(-38.21306546, 145.22678832),
)
def test_add_db_arguments():
parser = ArgumentParser()
db.add_db_arguments(parser)
args = parser.parse_args([])
assert args.dbuser == "postgres"
assert args.dbpassword == "password"
assert args.dbhost == "localhost"
assert args.dbport == "5433"
assert args.create_index
# TODO: test postgres with mocks
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/tests/test_adhoc_tools.py | tests/test_adhoc_tools.py | import datetime
from argparse import Namespace
import adhoc_tools
import testutils
from test_db import SAMPLE_ADDRESSES_DB_FILE
def test_check_processing_rate(monkeypatch):
"""Check the reporting function"""
monkeypatch.setattr(
"utils.read_json_file",
lambda filename: testutils.read_test_data_json("combined-suburbs.json"),
)
data = adhoc_tools.check_processing_rate()
assert len(data) == 3
assert data[0] == (datetime.date(2021, 7, 7), 1)
assert data[1] == (datetime.date(2022, 8, 2), 1)
assert data[-1] == ("TOTAL", 2)
def test_get_db_suburb_list():
args = Namespace(dbhost=SAMPLE_ADDRESSES_DB_FILE)
suburbs = adhoc_tools.get_db_suburb_list(args)
assert len(suburbs) == 5
assert len(suburbs["NSW"]) == 2
def test_add_address_count_to_suburbs(monkeypatch):
monkeypatch.setattr(
"utils.read_json_file",
lambda filename: testutils.read_test_data_json("combined-suburbs-somer.json"),
)
SAVED_JSON = {}
def dummy_write_json_file(filename: str, data: dict, indent=4):
SAVED_JSON[filename] = data
monkeypatch.setattr("suburbs.utils.write_json_file", dummy_write_json_file)
# DB has 8 suburbs (like "SOMER") across 5 states
# all-suburbs has three of these with different (big) address-counts
args = Namespace(dbhost=SAMPLE_ADDRESSES_DB_FILE)
adhoc_tools.add_address_count_to_suburbs(args)
assert len(SAVED_JSON) == 1
assert "results/combined-suburbs.json" in SAVED_JSON
nsw_suburbs = {s["name"]: s for s in SAVED_JSON["results/combined-suburbs.json"]["NSW"]}
assert nsw_suburbs["Somersby"]["address_count"] == 5
assert nsw_suburbs["Somerton"]["address_count"] == 1
sa_suburbs = {s["name"]: s for s in SAVED_JSON["results/combined-suburbs.json"]["SA"]}
assert sa_suburbs["Somerton Park"]["address_count"] == 20
def test_update_suburb_dates(monkeypatch):
monkeypatch.setattr("adhoc_tools.get_nbn_suburb_dates", lambda: testutils.read_test_data_json("suburb-dates.json"))
monkeypatch.setattr("utils.read_json_file", lambda filename: testutils.read_test_data_json("combined-suburbs.json"))
SAVED_JSON = {}
def dummy_write_json_file(filename: str, data: dict, indent=4):
SAVED_JSON[filename] = data
monkeypatch.setattr("suburbs.utils.write_json_file", dummy_write_json_file)
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/tests/testutils.py | tests/testutils.py | import json
import os
def read_file_string(filename: str) -> str:
"""Read the contents of a file as a string"""
with open(filename) as f:
return f.read()
def get_test_data_path(filename: str) -> str:
"""Get the full path to a test data file."""
return f"{os.path.dirname(os.path.realpath(__file__))}/data/{filename}"
def read_test_data_file(filename: str) -> str:
"""Read the contents of a test data file."""
with open(get_test_data_path(filename), encoding="utf-8") as file:
return file.read()
def read_test_data_json(filename: str) -> dict:
"""Read the contents of a test data file as JSON."""
with open(get_test_data_path(filename), "r", encoding="utf-8") as file:
return json.load(file)
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/tests/test_nbn.py | tests/test_nbn.py | import testutils
from nbn import CACHE, NBNApi
class JSONWrapper:
def __init__(self, data):
self.data = data
def json(self):
return self.data
def raise_for_status(self):
pass
def requests_session_get(self, url, **kwargs):
"""Return canned NBN API data for testing"""
if url.startswith("https://places.nbnco.net.au/places/v1/autocomplete?query="):
query = url.split("=")[-1]
return JSONWrapper(testutils.read_test_data_json(f"nbn/query.{query}.json"))
elif url.startswith("https://places.nbnco.net.au/places/v2/details/"):
loc_id = url.split("/")[-1]
return JSONWrapper(testutils.read_test_data_json(f"nbn/details.{loc_id}.json"))
raise NotImplementedError
def test_get_address(monkeypatch):
monkeypatch.setattr("nbn.requests.Session.get", requests_session_get)
nbn = NBNApi()
# test uncached
CACHE.clear()
details = nbn.get_nbn_loc_details("LOC000126303452")
assert details["servingArea"]["techType"] == "FTTN"
assert details["servingArea"]["description"] == "Moggill"
assert details["addressDetail"]["formattedAddress"] == "LOT 56 1 BLUEGUM RISE ANSTEAD QLD 4070 Australia"
assert details["addressDetail"]["techChangeStatus"] == "Committed"
assert details["addressDetail"]["programType"] == "On-Demand N2P SDU/MDU Simple"
assert details["addressDetail"]["targetEligibilityQuarter"] == "Jun 2024"
# test cached
details = nbn.get_nbn_loc_details("LOC000126303452")
assert details["servingArea"]["techType"] == "FTTN"
# fetch, and cache-hit
loc_id = nbn.get_nbn_loc_id("X1", "1 BLUEGUM RISE ANSTEAD 4070")
assert loc_id == "LOC000126303452"
loc_id = nbn.get_nbn_loc_id("X1", "XXX") # cached
assert loc_id == "LOC000126303452"
# not found
loc_id = nbn.get_nbn_loc_id("X2", "1 XYZ ROAD ABCABC 4070")
assert loc_id is None
nbn.close()
def test_mixed_google_and_loc_ids(monkeypatch):
"""Test that the API correctly filters Google Place IDs and returns LOC IDs."""
monkeypatch.setattr("nbn.requests.Session.get", requests_session_get)
nbn = NBNApi()
CACHE.clear()
# Test mixed results where Google Place ID comes first
# Should return the LOC ID, not the Google Place ID
loc_id = nbn.get_nbn_loc_id("X3", "TEST MIXED RESULTS")
assert loc_id == "LOC000999999999", f"Expected LOC ID but got: {loc_id}"
nbn.close()
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/tests/test_export_locids_csv.py | tests/test_export_locids_csv.py | import csv
import os
import tempfile
import export_locids_csv
def test_iter_feature_data_with_default_fields(monkeypatch):
"""Test iterating features with default fields."""
mock_geojson = {
"features": [
{
"geometry": {"coordinates": [115.78506081, -31.95270506]},
"properties": {
"name": "100 STEPHENSON AVENUE MOUNT CLAREMONT 6010",
"locID": "LOC000190342926",
"tech": "FTTN",
"upgrade": "FTTP_NA",
"gnaf_pid": "GAWA_146611183",
"tech_change_status": "In Design",
"program_type": "Customer-initiated N2P MDU Complex",
"target_eligibility_quarter": "Mar 2028",
},
},
{
"geometry": {"coordinates": [115.77758570, -31.96588092]},
"properties": {
"name": "27 LISLE STREET MOUNT CLAREMONT 6010",
"locID": "LOC000027955790",
"tech": "HFC",
"upgrade": "NULL_NA",
"gnaf_pid": "GAWA_146615762",
},
},
]
}
def mock_get_all_geojson_files(show_progress=True, rewrite_geojson=False):
yield "test.geojson", mock_geojson
monkeypatch.setattr("export_locids_csv.get_all_geojson_files", mock_get_all_geojson_files)
# Test with default fields
results = list(export_locids_csv.iter_feature_data(show_progress=False))
assert len(results) == 2
assert results[0]["loc_id"] == "LOC000190342926"
assert results[0]["latitude"] == -31.95270506
assert results[0]["longitude"] == 115.78506081
assert results[0]["name"] == "100 STEPHENSON AVENUE MOUNT CLAREMONT 6010"
assert results[0]["tech"] == "FTTN"
assert results[0]["upgrade"] == "FTTP_NA"
assert results[0]["gnaf_pid"] == "GAWA_146611183"
assert results[0]["tech_change_status"] == "In Design"
assert results[0]["program_type"] == "Customer-initiated N2P MDU Complex"
assert results[0]["target_eligibility_quarter"] == "Mar 2028"
def test_iter_feature_data_with_specific_fields(monkeypatch):
"""Test iterating features with specific fields."""
mock_geojson = {
"features": [
{
"geometry": {"coordinates": [115.78506081, -31.95270506]},
"properties": {
"name": "100 STEPHENSON AVENUE MOUNT CLAREMONT 6010",
"locID": "LOC000190342926",
"tech": "FTTN",
"upgrade": "FTTP_NA",
"gnaf_pid": "GAWA_146611183",
},
},
]
}
def mock_get_all_geojson_files(show_progress=True, rewrite_geojson=False):
yield "test.geojson", mock_geojson
monkeypatch.setattr("export_locids_csv.get_all_geojson_files", mock_get_all_geojson_files)
# Test with specific fields
results = list(export_locids_csv.iter_feature_data(show_progress=False, fields=["loc_id", "name", "tech"]))
assert len(results) == 1
assert "loc_id" in results[0]
assert "name" in results[0]
assert "tech" in results[0]
assert results[0]["loc_id"] == "LOC000190342926"
assert results[0]["name"] == "100 STEPHENSON AVENUE MOUNT CLAREMONT 6010"
assert results[0]["tech"] == "FTTN"
def test_iter_feature_data_skip_missing_locid(monkeypatch):
"""Test that features without locID are skipped."""
mock_geojson = {
"features": [
{
"geometry": {"coordinates": [115.78506081, -31.95270506]},
"properties": {
"name": "Missing LocID",
"tech": "FTTN",
},
},
{
"geometry": {"coordinates": [115.77758570, -31.96588092]},
"properties": {
"name": "Has LocID",
"locID": "LOC000027955790",
"tech": "HFC",
},
},
]
}
def mock_get_all_geojson_files(show_progress=True, rewrite_geojson=False):
yield "test.geojson", mock_geojson
monkeypatch.setattr("export_locids_csv.get_all_geojson_files", mock_get_all_geojson_files)
results = list(export_locids_csv.iter_feature_data(show_progress=False, fields=["loc_id", "name"]))
assert len(results) == 1
assert results[0]["loc_id"] == "LOC000027955790"
def test_iter_feature_data_skip_missing_coordinates(monkeypatch):
"""Test that features without valid coordinates are skipped."""
mock_geojson = {
"features": [
{
"geometry": {"coordinates": None},
"properties": {
"locID": "LOC000001",
"name": "Missing coords",
},
},
{
"geometry": {"coordinates": [None, None]},
"properties": {
"locID": "LOC000002",
"name": "Null coords",
},
},
{
"geometry": {"coordinates": [115.77758570, -31.96588092]},
"properties": {
"locID": "LOC000003",
"name": "Valid coords",
},
},
]
}
def mock_get_all_geojson_files(show_progress=True, rewrite_geojson=False):
yield "test.geojson", mock_geojson
monkeypatch.setattr("export_locids_csv.get_all_geojson_files", mock_get_all_geojson_files)
results = list(export_locids_csv.iter_feature_data(show_progress=False, fields=["loc_id", "name"]))
assert len(results) == 1
assert results[0]["loc_id"] == "LOC000003"
def test_write_csv_with_default_fields():
"""Test writing CSV with default fields."""
rows = [
{"loc_id": "LOC001", "latitude": -31.95270506, "longitude": 115.78506081},
{"loc_id": "LOC002", "latitude": -31.96588092, "longitude": 115.77758570},
]
with tempfile.TemporaryDirectory() as tmpdir:
output_path = os.path.join(tmpdir, "test.csv")
count = export_locids_csv.write_csv(output_path, rows, ["loc_id", "latitude", "longitude"], dedupe=True)
assert count == 2
assert os.path.exists(output_path)
with open(output_path, "r", encoding="utf-8") as f:
reader = csv.DictReader(f)
rows_read = list(reader)
assert len(rows_read) == 2
assert rows_read[0]["loc_id"] == "LOC001"
assert rows_read[0]["latitude"] == "-31.95270506"
assert rows_read[0]["longitude"] == "115.78506081"
def test_write_csv_with_all_fields():
"""Test writing CSV with all fields."""
rows = [
{
"loc_id": "LOC001",
"latitude": -31.95270506,
"longitude": 115.78506081,
"name": "100 STEPHENSON AVENUE MOUNT CLAREMONT 6010",
"tech": "FTTN",
"upgrade": "FTTP_NA",
"gnaf_pid": "GAWA_146611183",
"tech_change_status": "In Design",
"program_type": "Customer-initiated N2P MDU Complex",
"target_eligibility_quarter": "Mar 2028",
},
]
with tempfile.TemporaryDirectory() as tmpdir:
output_path = os.path.join(tmpdir, "test.csv")
count = export_locids_csv.write_csv(output_path, rows, export_locids_csv.AVAILABLE_FIELDS, dedupe=True)
assert count == 1
assert os.path.exists(output_path)
with open(output_path, "r", encoding="utf-8") as f:
reader = csv.DictReader(f)
rows_read = list(reader)
assert len(rows_read) == 1
assert rows_read[0]["loc_id"] == "LOC001"
assert rows_read[0]["name"] == "100 STEPHENSON AVENUE MOUNT CLAREMONT 6010"
assert rows_read[0]["tech"] == "FTTN"
assert rows_read[0]["tech_change_status"] == "In Design"
def test_write_csv_dedupe():
"""Test CSV deduplication by loc_id."""
rows = [
{"loc_id": "LOC001", "latitude": -31.95270506, "longitude": 115.78506081},
{"loc_id": "LOC001", "latitude": -31.95270506, "longitude": 115.78506081}, # duplicate
{"loc_id": "LOC002", "latitude": -31.96588092, "longitude": 115.77758570},
]
with tempfile.TemporaryDirectory() as tmpdir:
output_path = os.path.join(tmpdir, "test.csv")
# With deduplication
count = export_locids_csv.write_csv(output_path, rows, ["loc_id", "latitude", "longitude"], dedupe=True)
assert count == 2
# Without deduplication
count = export_locids_csv.write_csv(output_path, rows, ["loc_id", "latitude", "longitude"], dedupe=False)
assert count == 3
def test_write_csv_custom_fields():
"""Test writing CSV with custom field selection."""
rows = [
{
"loc_id": "LOC001",
"name": "Address 1",
"tech": "FTTN",
"upgrade": "FTTP_NA",
"latitude": -31.95270506,
"longitude": 115.78506081,
},
]
with tempfile.TemporaryDirectory() as tmpdir:
output_path = os.path.join(tmpdir, "test.csv")
count = export_locids_csv.write_csv(output_path, rows, ["loc_id", "name", "tech"], dedupe=True)
assert count == 1
with open(output_path, "r", encoding="utf-8") as f:
reader = csv.DictReader(f)
rows_read = list(reader)
assert len(rows_read) == 1
assert "loc_id" in rows_read[0]
assert "name" in rows_read[0]
assert "tech" in rows_read[0]
assert "latitude" not in rows_read[0]
assert "longitude" not in rows_read[0]
def test_available_fields_constant():
"""Test that AVAILABLE_FIELDS contains expected fields."""
expected_fields = [
"loc_id",
"latitude",
"longitude",
"name",
"tech",
"upgrade",
"gnaf_pid",
"tech_change_status",
"program_type",
"target_eligibility_quarter",
]
assert export_locids_csv.AVAILABLE_FIELDS == expected_fields
def test_default_fields_constant():
"""Test that DEFAULT_FIELDS maintains backward compatibility."""
assert export_locids_csv.DEFAULT_FIELDS == ["loc_id", "latitude", "longitude"]
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/code/sitemap.py | code/sitemap.py | import xml.etree.ElementTree as ET
from urllib.parse import quote
import utils
def generate_sitemap(json_file, output_file):
# Load suburbs data from JSON file
data = utils.read_json_file(json_file)
# Create the root element for the XML sitemap
urlset = ET.Element("urlset", xmlns="http://www.sitemaps.org/schemas/sitemap/0.9")
# Add the static stats page without a lastmod tag
add_url(urlset, "https://nbn.lukeprior.com/stats")
# Generate URLs for each suburb
for state, suburbs in data.items():
for suburb in suburbs:
suburb_name = suburb["name"]
encoded_suburb = quote(suburb_name.lower().replace(" ", "-"))
processed_date = suburb["processed_date"].split("T")[0]
url = f"https://nbn.lukeprior.com/?suburb={encoded_suburb}&state={state.lower()}"
add_url(urlset, url, processed_date)
# Convert the XML tree to a string
sitemap_xml = ET.tostring(urlset, encoding="utf-8", xml_declaration=True).decode("utf-8")
# Save the XML to a file
with open(output_file, "w") as f:
f.write(sitemap_xml)
def add_url(urlset, loc, lastmod=None):
url = ET.SubElement(urlset, "url")
loc_element = ET.SubElement(url, "loc")
loc_element.text = loc
if lastmod:
lastmod_element = ET.SubElement(url, "lastmod")
lastmod_element.text = lastmod
generate_sitemap("results/combined-suburbs.json", "site/sitemap.xml")
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/code/geojson.py | code/geojson.py | import json
import logging
import os
from datetime import datetime
from data import Address, AddressList
from utils import read_json_file, write_json_file
def format_addresses(addresses: AddressList, suburb: str, generated: datetime = None) -> dict:
"""Convert the list of addresses (with upgrade+tech fields) into a GeoJSON FeatureCollection."""
def make_feature(address: Address) -> dict:
"""Given an Address, return a GeoJSON Feature."""
feature = {
"type": "Feature",
"geometry": {"type": "Point", "coordinates": [address.longitude, address.latitude]},
"properties": {
"name": address.name,
"locID": address.loc_id,
"tech": address.tech,
"upgrade": address.upgrade,
"gnaf_pid": address.gnaf_pid,
},
}
if address.tech_change_status:
feature["properties"]["tech_change_status"] = address.tech_change_status
if address.program_type:
feature["properties"]["program_type"] = address.program_type
if address.target_eligibility_quarter:
feature["properties"]["target_eligibility_quarter"] = address.target_eligibility_quarter
return feature
features = [
make_feature(address)
for address in addresses
if address.upgrade and address.tech # don't include addresses with no tech or upgrade
]
if generated is None:
generated = datetime.now()
return {
"type": "FeatureCollection",
"generated": generated.isoformat(),
"suburb": suburb,
"features": sorted(features, key=lambda x: x["properties"]["gnaf_pid"]),
}
def get_geojson_filename(suburb: str, state: str) -> str:
"""Get the filename for the GeoJSON file."""
return f"results/{state.upper()}/{suburb.lower().replace(' ', '-')}.geojson"
def write_geojson_file(suburb: str, state: str, addresses: AddressList, generated: datetime = None):
"""Write the GeoJSON FeatureCollection to a file."""
filename = get_geojson_filename(suburb, state)
os.makedirs(os.path.dirname(filename), exist_ok=True)
logging.info("Writing results to %s", filename)
write_json_file(
filename, format_addresses(addresses, suburb, generated), indent=1
) # indent=1 is to minimise size increase
def read_geojson_file(suburb: str, state: str) -> dict:
"""Read the GeoJSON FeatureCollection from a file, or return None"""
filename = get_geojson_filename(suburb, state)
if os.path.exists(filename):
return read_json_file(filename)
def feature_to_address(f: dict) -> Address:
"""Return an Address from a GeoJSON Feature"""
return Address(
name=f["properties"]["name"],
gnaf_pid=f["properties"].get("gnaf_pid"),
longitude=f["geometry"]["coordinates"][0],
latitude=f["geometry"]["coordinates"][1],
loc_id=f["properties"]["locID"],
tech=f["properties"]["tech"],
upgrade=f["properties"]["upgrade"],
tech_change_status=f["properties"].get("tech_change_status"),
program_type=f["properties"].get("program_type"),
target_eligibility_quarter=f["properties"].get("target_eligibility_quarter"),
)
def read_geojson_file_addresses(suburb: str, state: str) -> (AddressList, datetime):
"""Read the Addresses (and generated datetime) from a GeoJSON FeatureCollection"""
info = read_geojson_file(suburb, state)
return list(map(feature_to_address, info["features"])), datetime.fromisoformat(info["generated"])
def get_geojson_file_generated_from_name(suburb: str, state: str) -> datetime:
"""Given a suburb and state, get the generated date from the GeoJSON file (faster than reading whole file)."""
return get_geojson_file_generated(get_geojson_filename(suburb, state))
def get_geojson_file_generated(filename) -> datetime:
"""Get the generated date from the GeoJSON file (faster than reading whole file)."""
if os.path.exists(filename):
# attempt to load just the first few lines of the file
try:
with open(filename, encoding="utf-8") as file:
first_bit = file.readline() + file.readline() + file.readline().replace(",", "") + "}"
result = json.loads(first_bit)
if "generated" not in result:
result = read_json_file(filename)
except json.JSONDecodeError:
# sometimes generated is not at the top of the file, fall back to loading the entire thing
result = read_json_file(filename)
return datetime.fromisoformat(result["generated"])
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/code/export_locids_csv.py | code/export_locids_csv.py | import argparse
import csv
import os
from typing import Any, Dict, Iterable, List
from utils import get_all_geojson_files
# All available fields in GeoJSON features
AVAILABLE_FIELDS = [
"loc_id",
"latitude",
"longitude",
"name",
"tech",
"upgrade",
"gnaf_pid",
"tech_change_status",
"program_type",
"target_eligibility_quarter",
]
# Default fields to export (maintains backward compatibility)
DEFAULT_FIELDS = ["loc_id", "latitude", "longitude"]
def iter_feature_data(show_progress: bool = True, fields: List[str] = None) -> Iterable[Dict[str, Any]]:
"""Yield feature data as a dictionary for every feature across all GeoJSON files.
Args:
show_progress: Whether to show progress while scanning files
fields: List of field names to extract. If None, extracts all available fields.
Skips features missing locID or coordinates.
"""
if fields is None:
fields = AVAILABLE_FIELDS
for _filename, geojson_data in get_all_geojson_files(show_progress=show_progress, rewrite_geojson=False):
for f in geojson_data.get("features", []):
props = f.get("properties", {})
loc_id = props.get("locID")
geom = f.get("geometry") or {}
coords = geom.get("coordinates") or [None, None]
# Skip features without loc_id or coordinates
if not loc_id:
continue
if coords is None or len(coords) < 2 or coords[0] is None or coords[1] is None:
continue
lng, lat = coords[0], coords[1]
# Build field extraction mapping
field_extractors = {
"loc_id": loc_id,
"latitude": float(lat),
"longitude": float(lng),
"name": props.get("name", ""),
"tech": props.get("tech", ""),
"upgrade": props.get("upgrade", ""),
"gnaf_pid": props.get("gnaf_pid", ""),
"tech_change_status": props.get("tech_change_status", ""),
"program_type": props.get("program_type", ""),
"target_eligibility_quarter": props.get("target_eligibility_quarter", ""),
}
# Build the result dictionary with requested fields
result = {field: field_extractors[field] for field in fields}
yield result
def write_csv(output_path: str, rows: Iterable[Dict[str, Any]], fields: List[str], dedupe: bool = True) -> int:
"""Write rows to CSV with specified fields. Returns count written.
Args:
output_path: Path to write the CSV file
rows: Iterable of dictionaries containing feature data
fields: List of field names to include in the CSV
dedupe: Whether to deduplicate by loc_id
"""
os.makedirs(os.path.dirname(output_path), exist_ok=True)
seen = set()
count = 0
with open(output_path, "w", newline="", encoding="utf-8") as f:
writer = csv.DictWriter(f, fieldnames=fields)
writer.writeheader()
for row in rows:
if dedupe and "loc_id" in row:
if row["loc_id"] in seen:
continue
seen.add(row["loc_id"])
# Format latitude and longitude with precision if they exist
formatted_row = {}
for field in fields:
value = row.get(field, "")
if field in ["latitude", "longitude"] and value != "":
formatted_row[field] = f"{value:.8f}"
else:
formatted_row[field] = value
writer.writerow(formatted_row)
count += 1
return count
def main():
parser = argparse.ArgumentParser(description="Export NBN data with selected fields to a CSV from GeoJSON results")
parser.add_argument(
"--output",
default=os.path.join("results", "locids.csv"),
help="Path to write the CSV (default: results/locids.csv)",
)
parser.add_argument(
"--no-dedupe",
action="store_true",
help="Do not deduplicate by loc_id (default is to dedupe)",
)
parser.add_argument(
"--no-progress",
action="store_true",
help="Disable progress output while scanning files",
)
parser.add_argument(
"--fields",
nargs="+",
choices=AVAILABLE_FIELDS,
default=DEFAULT_FIELDS,
help=(
f"Fields to include in the CSV (default: {' '.join(DEFAULT_FIELDS)}). "
f"Available: {', '.join(AVAILABLE_FIELDS)}"
),
)
parser.add_argument(
"--all-fields",
action="store_true",
help="Include all available fields in the CSV",
)
args = parser.parse_args()
# Determine which fields to export
fields = AVAILABLE_FIELDS if args.all_fields else args.fields
rows = iter_feature_data(show_progress=not args.no_progress, fields=fields)
written = write_csv(args.output, rows, fields, dedupe=not args.no_dedupe)
print(f"Wrote {written} rows with fields [{', '.join(fields)}] to {args.output}")
if __name__ == "__main__":
main()
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/code/db.py | code/db.py | import logging
import sqlite3
from abc import ABC, abstractmethod
from argparse import ArgumentParser, Namespace
import data
import psycopg2
from psycopg2.extras import NamedTupleCursor
SQLITE_FILE_EXTENSIONS = {"db", "sqlite", "sqlite3", "db3", "s3db", "sl3"}
class DbDriver(ABC):
"""Abstract class for DB connections."""
@abstractmethod
def execute(self, query, args=None):
"""Return a list of Namespace objects for the provided query."""
pass
class AddressDB:
"""Connect to our cut-down version of the GNAF Postgres database and query for addresses."""
def __init__(self, db: DbDriver):
self.db = db
def get_addresses(self, target_suburb: str, target_state: str) -> data.AddressList:
"""Return a list of Address for the provided suburb+state from the database."""
query = """
SELECT gnaf_pid, address, postcode, latitude, longitude
FROM address_principals
WHERE locality_name = %s AND state = %s
LIMIT 100000"""
return [
data.Address(
name=f"{row.address} {target_suburb} {row.postcode}",
gnaf_pid=row.gnaf_pid,
longitude=float(row.longitude),
latitude=float(row.latitude),
)
for row in self.db.execute(query, (target_suburb, target_state))
]
def get_counts_by_suburb(self) -> dict[str, dict[str, int]]:
"""return a tally of addresses by state and suburb"""
query = """
SELECT locality_name, state, COUNT(*) as count
FROM address_principals
GROUP BY locality_name, state
ORDER BY state, locality_name
"""
results = {}
for record in self.db.execute(query):
if record.state not in results:
results[record.state] = {}
results[record.state][record.locality_name] = record.count
return results
def get_extents_by_suburb(self) -> dict:
"""return the bounding box for each state/suburb as a tuple of (min_lat, min_long), (max_lat, max_long)"""
query = """
SELECT locality_name, state,
min(latitude) as min_lat,
max(latitude) as max_lat,
min(longitude) as min_long,
max(longitude) as max_long
FROM address_principals
GROUP BY locality_name, state
ORDER BY state, locality_name
"""
results = {}
for record in self.db.execute(query):
if record.state not in results:
results[record.state] = {}
results[record.state][record.locality_name] = (
(float(record.min_lat), float(record.min_long)),
(float(record.max_lat), float(record.max_long)),
)
return results
def add_db_arguments(parser: ArgumentParser):
"""Add arguments to the provided parser for connecting to the DB"""
parser.add_argument("-u", "--dbuser", help="The name of the database user", default="postgres")
parser.add_argument(
"-p",
"--dbpassword",
help="The password for the database user",
default="password",
)
parser.add_argument(
"-H", "--dbhost", help="The hostname for the database (or file-path for Sqlite)", default="localhost"
)
parser.add_argument("-P", "--dbport", help="The port number for the database", default="5433")
parser.add_argument(
"-i",
"--create_index",
help="Whether to disable adding an index to the DB to help speed up queries (only used for GitHub Actions)",
action="store_false",
)
class PostgresDb(DbDriver):
"""Class that implements Postgresql DB connection."""
def __init__(self, database: str, host: str, port: str, user: str, password: str, create_index: bool = True):
"""Connect to the database"""
conn = psycopg2.connect(
database=database, host=host, port=port, user=user, password=password, cursor_factory=NamedTupleCursor
)
self.cur = conn.cursor()
# detect the schema used by the DB
self.cur.execute("SELECT schema_name FROM information_schema.schemata where schema_name like 'gnaf_%'")
db_schema = self.cur.fetchone().schema_name
self.cur.execute(f"SET search_path TO {db_schema}")
conn.commit()
# optionally create a DB index
if create_index:
logging.info("Creating DB index...")
self.cur.execute(
"CREATE INDEX IF NOT EXISTS address_name_state ON address_principals (locality_name, state)"
)
conn.commit()
def execute(self, query, args=None):
"""Return a list of Namespace objects for the provided query."""
self.cur.execute(query, args)
return self.cur.fetchall()
class SqliteDb(DbDriver):
"""Class that implements Sqlite DB connection (to a file). Pass the filename as the dbhost."""
def __init__(self, database_file: str):
"""Connect to the database"""
conn = sqlite3.connect(database_file)
conn.row_factory = sqlite3.Row
self.cur = conn.cursor()
def execute(self, query, args=None):
"""Return a list of Namespace objects for the provided query."""
query = query.replace("%s", "?")
if args is None:
args = {}
logging.info("Executing query: %s", query)
self.cur.execute(query, args)
# sqlite doesn't support NamedTupleCursor, so we need to manually add the column names
return [Namespace(**dict(zip(x.keys(), x))) for x in self.cur.fetchall()]
def connect_to_db(args: Namespace) -> AddressDB:
"""return a DB connection based on the provided args"""
if args.dbhost.split(".")[-1] in SQLITE_FILE_EXTENSIONS:
db = SqliteDb(args.dbhost)
else:
db = PostgresDb(
"postgres",
args.dbhost,
args.dbport,
args.dbuser,
args.dbpassword,
args.create_index,
)
return AddressDB(db)
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/code/update_breakdown.py | code/update_breakdown.py | #!/usr/bin/env python3
"""a cut-down version of update_historical_tech_and_upgrade_breakdown() that processes the current checkout"""
import logging
from adhoc_tools import generate_state_breakdown, update_breakdown
from tabulate import tabulate
def print_breakdowns(breakdowns):
"""Dump the breakdowns to the console as tables."""
for key in {"tech", "upgrade"}:
rows = [{"date": run_date} | breakdowns[run_date][key] for run_date in sorted(breakdowns)]
print()
print(tabulate(rows, headers="keys", tablefmt="github"))
if __name__ == "__main__": # pragma: no cover
logging.basicConfig(level=logging.INFO)
bd = update_breakdown()
print_breakdowns(bd)
generate_state_breakdown()
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/code/adhoc_tools.py | code/adhoc_tools.py | #!/usr/bin/env python3
import argparse
import csv
import glob
import logging
import os
import pprint
import re
import subprocess
from collections import Counter, OrderedDict
from datetime import datetime, timedelta
import data
import db
import geojson
import main
import requests
import suburbs
import utils
from bs4 import BeautifulSoup
from tabulate import tabulate
from utils import get_all_features
NBN_UPGRADE_DATES_URL = (
"https://www.nbnco.com.au/corporate-information/media-centre/media-statements/nbnco-announces-suburbs-and"
"-towns-where-an-additional-ninty-thousand-homes-and-businesses-will-become-eligible-for-fibre-upgrades"
)
def get_nbn_suburb_dates():
"""Parse a NBN web page to get a list of all suburb upgrade dates."""
logging.info("Fetching list of suburbs from NBN website...")
content = requests.get(NBN_UPGRADE_DATES_URL).content
results = {}
soup = BeautifulSoup(content, "html.parser")
for state_element in soup.find_all("div", class_="cmp-accordion__item"):
state = state_element.find("span", class_="cmp-accordion__title").text
results[state] = {}
for p in state_element.find("div", class_="cmp-text").find_all("p"):
for suburb, date in re.findall(r"^(.*) - from (\w+ \d{4})", p.text, flags=re.MULTILINE):
results[state][suburb.title()] = date
return results
def get_db_suburb_list(args):
"""Get list of all states and suburbs from the database"""
xdb = db.connect_to_db(args)
db_suburb_counts = xdb.get_counts_by_suburb()
return {
state: [s.title() for s in sorted(suburb_counts.keys())] for state, suburb_counts in db_suburb_counts.items()
}
def add_address_count_to_suburbs(args):
"""Add address counts to Suburb objects"""
xdb = db.connect_to_db(args)
db_suburb_counts = xdb.get_counts_by_suburb()
all_suburbs = suburbs.read_all_suburbs()
for state, suburb_list in all_suburbs.items():
for suburb in suburb_list:
suburb.address_count = db_suburb_counts.get(state, {}).get(suburb.name.upper(), 0)
suburbs.write_all_suburbs(all_suburbs)
def rebuild_status_file():
"""Fetch a list of all suburbs from DB, augment with processed+dates, and completed results"""
# Load list of all suburbs from DB
db_suburbs = get_db_suburb_list(args)
db_suburbs["QLD"].append("Barwidgi") # hack for empty suburb
# TODO: Townsville not in DB. Why? Two similar names included
# convert to sets for faster operation
db_suburbs = {state: set(suburb_list) for state, suburb_list in db_suburbs.items()}
all_suburbs = {} # state -> List[Suburb]
for state, suburb_list in db_suburbs.items():
all_suburbs[state] = []
for suburb in suburb_list:
processed_date = geojson.get_geojson_file_generated_from_name(suburb, state)
xsuburb = data.Suburb(name=suburb, processed_date=processed_date)
all_suburbs[state].append(xsuburb)
suburbs.write_all_suburbs(all_suburbs)
add_address_count_to_suburbs(args)
def resort_results():
"""Sort every one of the previously created geojson files by gnaf_pid"""
for state in data.STATES:
for file in glob.glob(f"results/{state}/*.geojson"):
print(file)
result = utils.read_json_file(file)
result["features"] = sorted(result["features"], key=lambda x: x["properties"]["gnaf_pid"])
utils.write_json_file(file, result, indent=1)
def get_suburb_extents():
"""Using the min/max lat/long of all addresses in each suburb, create a list of extents for each suburb"""
xdb = db.connect_to_db(args)
logging.info("Getting extents")
result = xdb.get_extents_by_suburb()
logging.info("Writing extents")
# pprint.pprint(result)
utils.write_json_file("results/suburb-extents.json", result, indent=1)
def update_all_suburbs_from_db():
"""Rewrite the (old) all_suburbs.json file from the DB. This is a one-off."""
db_suburbs = get_db_suburb_list(args)
db_suburbs["QLD"].append("Barwidgi") # hack for empty suburb
db_suburbs["QLD"].sort()
utils.write_json_file(
"results/all_suburbs.json",
{"states": {state: [suburb.upper() for suburb in suburb_list] for state, suburb_list in db_suburbs.items()}},
)
def check_processing_rate():
"""Emit a table of the number of suburbs processed each day"""
tally = Counter()
for state, suburb_list in suburbs.read_all_suburbs().items():
for suburb in suburb_list:
if not suburb.processed_date:
print(f"No processed date for {suburb.name}, {state}")
continue
tally[suburb.processed_date.date()] += 1
items = sorted(tally.items())
items.append(("TOTAL", sum(tally.values())))
print(tabulate(items, headers=["date", "count"], tablefmt="github"))
return items
def remove_duplicate_addresses():
"""Read all suburbs, and remove duplicate addresses from each suburb."""
all_suburbs = suburbs.read_all_suburbs()
for state, suburb_list in all_suburbs.items():
for suburb in suburb_list:
addresses, generated = geojson.read_geojson_file_addresses(suburb.name, state)
new_addresses = main.remove_duplicate_addresses(addresses)
if len(addresses) != len(new_addresses):
geojson.write_geojson_file(suburb.name.upper(), state, new_addresses, generated)
# No need to update progress, combined-suburbs: they are based on DB counts
def fix_gnaf_pid_mismatch():
"""Read all suburbs, and fix any gnaf_pid mismatches between the DB and the geojson files."""
xdb = db.connect_to_db(args)
all_suburbs = suburbs.read_all_suburbs()
for state, suburb_list in all_suburbs.items():
for suburb in suburb_list:
logging.info("Processing %s, %s", suburb.name, state)
db_addresses = xdb.get_addresses(suburb.name.upper(), state)
db_lookup = {a.name: a.gnaf_pid for a in db_addresses}
file_addresses, generated = geojson.read_geojson_file_addresses(suburb.name, state)
changed = 0
for a in file_addresses:
db_gnaf_pid = db_lookup.get(a.name)
if db_gnaf_pid and db_gnaf_pid != a.gnaf_pid:
# logging.info('Mismatch: %s db=%s file=%s', a.name, a.gnaf_pid, db_gnaf_pid)
a.gnaf_pid = db_gnaf_pid
changed += 1
if changed:
logging.info("Writing %s, %s - updated %d addresses", suburb.name, state, changed)
geojson.write_geojson_file(suburb.name.upper(), state, file_addresses, generated)
def get_tech_and_upgrade_breakdown(root_dir=".") -> dict:
"""Generate some tallies for tech-type and upgrade-status for all addresses (slow)."""
all_tech = Counter()
all_upgrade = Counter()
suburb_tech = {s: {} for s in data.STATES} # [State][Suburb] = Counter()
filenames = glob.glob(f"{root_dir}/results/**/*.geojson")
for i, filename in enumerate(filenames):
info = utils.read_json_file(filename)
addresses = list(map(geojson.feature_to_address, info["features"]))
all_tech.update(a.tech for a in addresses)
all_upgrade.update(a.upgrade for a in addresses if a.tech != "FTTP")
state = filename.split("/")[-2].upper()
suburb = filename.split("/")[-1].replace(".geojson", "").replace("-", " ").title()
suburb_tech[state][suburb] = Counter(a.tech for a in addresses)
if i % 100 == 0:
utils.print_progress_bar(i, len(filenames), prefix="Progress:", suffix="Complete", length=50)
return {
"tech": OrderedDict(all_tech.most_common()),
"upgrade": OrderedDict(all_upgrade.most_common()),
"suburb_tech": suburb_tech,
}
def update_historical_tech_and_upgrade_breakdown():
"""Using git, generate/update a list of tech and upgrade breakdowns over time."""
# use a separate checkout of the repo, so we don't have to worry about uncommitted changes
checkout_dir = "../new-checkout"
if not os.path.isdir(checkout_dir):
subprocess.run(f"git clone git@github.com:LukePrior/nbn-upgrade-map.git {checkout_dir}", check=True, shell=True)
# starting from ancient history, move forward 7 days at a time
breakdown_file = "results/breakdown.json"
breakdowns = utils.read_json_file(breakdown_file, True)
breakdown_suburbs_file = "results/breakdown-suburbs.json"
breakdown_suburbs = utils.read_json_file(breakdown_suburbs_file, True)
co_date = datetime(2023, 5, 23)
while co_date < datetime.now():
date_key = co_date.date().isoformat()
if date_key in breakdowns:
logging.info("Skipping %s", date_key)
else:
logging.info("Processing %s", date_key)
cmd = f"git checkout `git rev-list -n 1 --before=\"{co_date.strftime('%Y-%m-%d %H:%M')}\" main`"
subprocess.run(cmd, check=True, cwd=checkout_dir, shell=True)
breakdowns[date_key] = get_tech_and_upgrade_breakdown(checkout_dir)
breakdown_suburbs[date_key] = breakdowns[date_key].pop("suburb_tech")
utils.write_json_file(breakdown_file, breakdowns) # save each time
utils.write_json_file(breakdown_suburbs_file, breakdown_suburbs, indent=None) # save each time
co_date += timedelta(days=7)
# print tech+upgrade breakdown
for key in {"tech", "upgrade"}:
rows = [{"date": run_date} | breakdowns[run_date][key] for run_date in sorted(breakdowns)]
print()
print(tabulate(rows, headers="keys", tablefmt="github"))
def generate_all_suburbs_nbn_tallies():
"""Create a file containing a tally of all suburbs by property (tech, upgrade, etc)"""
exclude_properties = {"name", "locID", "gnaf_pid"}
tallies = {} # property-name -> Counter()
for _, _, feature in get_all_features():
for prop, value in feature["properties"].items():
if prop not in exclude_properties:
if prop not in tallies:
tallies[prop] = Counter()
tallies[prop][value] += 1
def _parse_quarter(item: tuple[str, int]):
"""Parse a quarter string into a datetime object. If NA, return epoch."""
try:
return datetime.strptime(item[0], "%b %Y")
except ValueError:
return datetime.fromtimestamp(0)
# sort tallies by frequency, except 'target_eligibility_quarter' which is sorted by date
tallies = {
k: OrderedDict(sorted(v.items(), key=_parse_quarter) if k == "target_eligibility_quarter" else v.most_common())
for k, v in tallies.items()
}
# Add percentages and missing items
total_count = sum(tallies["tech"].values()) # everything has a tech+NULL
tallies["percent"] = {}
for prop, kvs in tallies.items():
if prop in {"tech", "upgrade", "percent"}:
continue
kvs["None"] = total_count - sum(kvs.values())
tallies["percent"][prop] = {k: f"{100 * v / total_count:.2f}%" for k, v in kvs.items()}
utils.write_json_file("results/all-suburbs-nbn-tallies.json", tallies, indent=1)
def generate_state_breakdown():
"""Generate results/breakdown.STATE.csv containing history of connection-types by state"""
output = {}
all_ctypes = set()
for date, state_info in utils.read_json_file("results/breakdown-suburbs.json").items():
logging.info("Processing %s", date)
output[date] = {}
for state, suburb_list in state_info.items():
# logging.info(" State: %s", state)
state_tally = {}
for suburb, connections in suburb_list.items():
# logging.info(" State: %s", suburb)
for ctype, ccount in connections.items():
state_tally[ctype] = state_tally.get(ctype, 0) + ccount
all_ctypes.add(ctype)
output[date][state] = state_tally
utils.write_json_file("results/breakdown-state.json", output)
# write CSV per state
for state in data.STATES:
rows = [
{"date": date} | {ctype: output[date].get(state, {}).get(ctype, 0) for ctype in all_ctypes}
for date in output
]
with open(f"results/breakdown.{state}.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(rows[0].keys())
writer.writerows(r.values() for r in rows)
def fix_fw_tech_type():
"""Fix any tech-type 'fw' should be 'wireless'."""
for _, _, feature in get_all_features(rewrite_geojson=True):
if feature["properties"]["tech"] == "FW":
feature["properties"]["tech"] = "WIRELESS"
def fix_fw_tech_type_breakdowns():
"""Fix any tech-type 'FW' should be 'WIRELES' in breakdown files."""
def fix_tech_breakdown(tech):
"""Move any FW values to WIRELESS."""
if "FW" in tech:
tech["WIRELESS"] += tech["FW"]
del tech["FW"]
# breakdown.json
breakdowns = utils.read_json_file("results/breakdown.json")
for date, date_info in breakdowns.items():
fix_tech_breakdown(date_info["tech"])
utils.write_json_file("results/breakdown.json", breakdowns)
# breakdown-suburbs.json
breakdowns = utils.read_json_file("results/breakdown-suburbs.json")
for date, date_info in breakdowns.items():
for state, suburb_list in date_info.items():
for suburb, breakdown in suburb_list.items():
fix_tech_breakdown(breakdown)
utils.write_json_file("results/breakdown-suburbs.json", breakdowns, indent=None)
# breakdown-state.json and breakdown.STATE.csv (uses breakdown-suburbs.json)
generate_state_breakdown()
def check_tech_change_status_upgrade():
"""Emit tally on the upgrade field for all locations with tech_change_status."""
tallies = {}
for _, _, feature in get_all_features():
tech_change = feature["properties"].get("tech_change_status")
if tech_change:
if tech_change not in tallies:
tallies[tech_change] = Counter()
tallies[tech_change][feature["properties"].get("upgrade")] += 1
pprint.pprint(tallies)
def fix_ct_upgrades():
"""Update all locations with upgrade=XXX_CT and tech=OTHER to be tech=XXX and upgrade=OTHER"""
for _, _, feature in get_all_features(rewrite_geojson=True):
upgrade_val = feature["properties"]["upgrade"]
if upgrade_val in main.CT_UPGRADE_MAP:
feature["properties"]["upgrade"] = feature["properties"]["tech"]
feature["properties"]["tech"] = main.CT_UPGRADE_MAP[upgrade_val]
# update breakdown.json and breakdown-suburbs.json
update_breakdown()
# update breakdown-state.json and breakdown.STATE.csv
generate_state_breakdown()
def update_breakdown():
"""Update the breakdown.json file with the latest results (vs current checkout)."""
breakdown_file = "results/breakdown.json"
breakdowns = utils.read_json_file(breakdown_file, True)
breakdown_suburbs_file = "results/breakdown-suburbs.json"
breakdown_suburbs = utils.read_json_file(breakdown_suburbs_file, True)
date_key = datetime.now().date().isoformat()
if date_key in breakdowns:
logging.info("Skipping %s", date_key)
else:
logging.info("Processing %s", date_key)
breakdowns[date_key] = get_tech_and_upgrade_breakdown()
breakdown_suburbs[date_key] = breakdowns[date_key].pop("suburb_tech")
utils.write_json_file(breakdown_file, breakdowns)
utils.write_json_file(breakdown_suburbs_file, breakdown_suburbs, indent=None)
return breakdowns
def dump_status_tech_upgrade():
"""Dump the tech and upgrade breakdowns to the console."""
tallies = {} # status -> tech -> upgrade:count
for _, _, feature in get_all_features():
status = feature["properties"].get("tech_change_status", "?")
tech = feature["properties"]["tech"]
upgrade = feature["properties"]["upgrade"]
if status not in tallies:
tallies[status] = {}
if tech not in tallies[status]:
tallies[status][tech] = {}
tallies[status][tech][upgrade] = tallies[status][tech].get(upgrade, 0) + 1
pprint.pprint(tallies)
def generate_local_website():
"""Generate a version of the website with all data local."""
# copy index.html -> index-local.html
with open("./site/index.html") as f:
index_html = f.read().replace("main.js", "main-local.js")
with open("./site/index-local.html", "w") as f:
f.write(index_html)
# copy main.js -> main-local.js
with open("./site/main.js") as f:
gh_prefix = "https://raw.githubusercontent.com/LukePrior/nbn-upgrade-map"
main_js = (
f.read()
# use local results files
.replace(gh_prefix + "/main/results", "../results")
.replace(gh_prefix + '/" + commit + "/results', "../results")
# disable serviceworkerr
.replace("navigator.serviceWorker.", "// ")
# disable date selector
.replace("addControlWithHTML('date-selector'", "// ")
.replace("fetch(commits_url)", "new Promise( () => {} )")
# disable gtags
.replace("gtag(", "// gtag(")
)
with open("./site/main-local.js", "w") as f:
f.write(main_js)
# to view this locally, start a simple web-server with the following command (from the top level directory):
# python -m http.server 8000
# and open http://localhost:8000/site/index-local.html
if __name__ == "__main__":
LOGLEVEL = os.environ.get("LOGLEVEL", "INFO").upper()
logging.basicConfig(level=LOGLEVEL, format="%(asctime)s %(levelname)s %(threadName)s %(message)s")
parser = argparse.ArgumentParser(
description="Run adhoc utility functions to do various maintenence-type activitirs."
)
parser.add_argument("run_functions", help="Comma-separated list of no-arg functions to run")
db.add_db_arguments(parser)
args = parser.parse_args()
for f in args.run_functions.split(","):
if f not in globals():
raise Exception(f"Unknown function: {f}")
globals()[f]()
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/code/main.py | code/main.py | #!/usr/bin/env python3
"""Main script for fetching NBN data for a suburb from the NBN API and writing to a GeoJSON file."""
import argparse
import itertools
import logging
import os
import traceback
from collections import Counter
from collections.abc import Generator
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime, timedelta
from threading import Lock
import geojson
import requests
from data import Address, AddressList
from db import AddressDB, add_db_arguments, connect_to_db
from geojson import write_geojson_file
from nbn import NBNApi
from suburbs import (
read_all_suburbs,
update_processed_dates,
update_suburb_in_all_suburbs,
)
from utils import print_progress_bar
# a cache of gnaf_pid -> loc_id mappings (from previous results), and a max-age for that cache
GNAF_PID_TO_LOC: dict[str, str] = {}
# a cache of gnaf_pid -> Address for FTTP addresses (which won't change)
GNAF_PID_TO_FTTP_ADDRESS: dict[str, Address] = {}
MAX_LOC_CACHE_AGE_DAYS = 180
# map the 'upgrade' value ending with _CT to a new 'tech' status
CT_UPGRADE_MAP = {
"FTTN_CT": "FTTN",
"SAT_CT": "SATELLITE",
"FTTC_CT": "FTTC",
"FW_CT": "WIRELESS",
"FTTB_CT": "FTTB",
"HFC_CT": "HFC",
}
def select_suburb(target_suburb: str, target_state: str) -> Generator[tuple[str, str], None, None]:
"""Return a generator(suburb,state) tuple based on the provided input or the next suburb in the list."""
# 0. If suburb/state are given return that (only)
all_suburbs = read_all_suburbs()
if target_suburb is not None and target_state is not None:
logging.info("Selecting explicit %s, %s", target_suburb, target_state)
target_suburb = target_suburb.title()
target_state = target_state.upper()
for suburb in all_suburbs[target_state]:
if suburb.name == target_suburb:
yield suburb.name.upper(), target_state
return
# 1. find suburbs that have not been processed
logging.info("Checking for unprocessed suburbs...")
for state, suburb_list in all_suburbs.items():
for suburb in suburb_list:
if suburb.processed_date is None:
yield suburb.name.upper(), state
# 3. find suburbs for reprocessing
logging.info("Checking for all suburbs...")
by_date = {}
for state, suburb_list in all_suburbs.items():
by_date |= {s.processed_date: (s.name.upper(), state) for s in suburb_list if s.processed_date}
for processed_date in sorted(by_date):
yield by_date[processed_date]
def get_address(nbn: NBNApi, address: Address, get_status=True) -> Address:
"""Return an Address for the given db address, probably augmented with data from the NBN API."""
# Check if we have cached FTTP data for this address
if cached_fttp_address := GNAF_PID_TO_FTTP_ADDRESS.get(address.gnaf_pid):
# Return the cached FTTP address data (FTTP tech won't change)
address.loc_id = cached_fttp_address.loc_id
address.tech = cached_fttp_address.tech
address.upgrade = cached_fttp_address.upgrade
address.tech_change_status = cached_fttp_address.tech_change_status
address.program_type = cached_fttp_address.program_type
address.target_eligibility_quarter = cached_fttp_address.target_eligibility_quarter
return address
try:
if loc_id := GNAF_PID_TO_LOC.get(address.gnaf_pid):
address.loc_id = loc_id
else:
address.loc_id = nbn.get_nbn_loc_id(address.gnaf_pid, address.name)
if address.loc_id and get_status:
status = nbn.get_nbn_loc_details(address.loc_id)
address_detail = status["addressDetail"]
if address_detail["altReasonCode"] in CT_UPGRADE_MAP:
address.tech = CT_UPGRADE_MAP[address_detail["altReasonCode"]]
address.upgrade = address_detail["techType"]
else:
address.tech = address_detail["techType"]
address.upgrade = address_detail.get("altReasonCode", "UNKNOWN")
address.tech_change_status = address_detail.get("techChangeStatus")
address.program_type = address_detail.get("programType")
address.target_eligibility_quarter = address_detail.get("targetEligibilityQuarter")
except (requests.exceptions.RequestException, ValueError) as err:
logging.warning("Error fetching NBN data for %s: %s", address.name, err)
except Exception:
# gobble all exceptions so we can continue processing!
logging.warning(traceback.format_exc())
return address
def get_all_addresses(
db_addresses: AddressList, max_threads: int = 10, get_status: bool = True, progress_bar: bool = False
) -> AddressList:
"""Fetch all addresses for suburb+state from the DB and then fetch the upgrade+tech details for each address."""
# return list of Address
chunk_size = 200
sub_chunk_size = chunk_size // 10
addresses_completed = 0
lock = Lock()
def process_chunk(addresses_chunk: AddressList):
"""Process a chunk of DB addresses, augmenting them with NBN data."""
nbn = NBNApi()
results = []
sub_chunks = (addresses_chunk[i : i + sub_chunk_size] for i in range(0, len(addresses_chunk), sub_chunk_size))
for sub_chunk in sub_chunks:
results.extend(get_address(nbn, address, get_status) for address in sub_chunk)
with lock:
nonlocal addresses_completed
addresses_completed += len(sub_chunk)
if progress_bar:
print_progress_bar(
addresses_completed, len(db_addresses), prefix="Progress:", suffix="Complete", length=50
)
if not progress_bar:
logging.info("Completed %d requests", addresses_completed)
return results
logging.info("Submitting %d requests to add NBNco data...", len(db_addresses))
with ThreadPoolExecutor(max_workers=max_threads, thread_name_prefix="nbn") as executor:
chunks = (db_addresses[i : i + chunk_size] for i in range(0, len(db_addresses), chunk_size))
chunk_results = executor.map(process_chunk, chunks)
addresses = list(itertools.chain.from_iterable(chunk_results))
return addresses
def remove_duplicate_addresses(addresses: AddressList) -> AddressList:
"""remove duplicates (same address + same locID: ignore gnaf_pid)"""
unique_addresses = set()
output = []
for address in addresses:
key = (address.name, address.loc_id)
if key in unique_addresses:
logging.warning("Remove duplicate address: %s", address.name)
else:
unique_addresses.add(key)
output.append(address)
return output
def process_suburb(
db: AddressDB,
state: str,
suburb: str,
max_threads: int = 10,
progress_bar: bool = False,
):
"""Query the DB for addresses, augment them with upgrade+tech details, and write the results to a file."""
# get addresses from DB
logging.info("Fetching all addresses for %s, %s", suburb.title(), state)
db_addresses = db.get_addresses(suburb, state)
db_addresses.sort(key=lambda k: k.name)
logging.info("Fetched %d addresses from database", len(db_addresses))
# if the output file exists already the use it to cache locid lookup
global GNAF_PID_TO_LOC, GNAF_PID_TO_FTTP_ADDRESS
if results := geojson.read_geojson_file(suburb, state):
file_generated = datetime.fromisoformat(results["generated"])
if (datetime.now() - file_generated).days < MAX_LOC_CACHE_AGE_DAYS:
logging.info("Loaded %d addresses from output file", len(results["features"]))
GNAF_PID_TO_LOC = {
feature["properties"]["gnaf_pid"]: feature["properties"]["locID"] for feature in results["features"]
}
# Cache FTTP addresses since they won't change
GNAF_PID_TO_FTTP_ADDRESS = {
feature["properties"]["gnaf_pid"]: geojson.feature_to_address(feature)
for feature in results["features"]
if feature["properties"].get("tech") == "FTTP"
}
if GNAF_PID_TO_FTTP_ADDRESS:
logging.info("Cached %d FTTP addresses (will skip rechecking)", len(GNAF_PID_TO_FTTP_ADDRESS))
# get NBN data for addresses
addresses = get_all_addresses(db_addresses, max_threads, progress_bar=progress_bar)
addresses = remove_duplicate_addresses(addresses)
# emit some tallies
tech_tally = Counter(address.tech for address in addresses)
logging.info("Completed. Tally of tech types: %s", dict(tech_tally))
types = [
"None" if address.loc_id is None else "LOC" if address.loc_id.startswith("LOC") else "Other"
for address in addresses
]
loc_tally = Counter(types)
logging.info("Location ID types: %s", dict(loc_tally))
write_geojson_file(suburb, state, addresses)
update_suburb_in_all_suburbs(suburb, state)
def main():
"""Parse command line arguments and start processing selected suburb."""
parser = argparse.ArgumentParser(
description="Create GeoJSON files containing FTTP upgrade details for the prescribed suburb."
)
parser.add_argument("--suburb", help='The name of a suburb, for example "Bli Bli"')
parser.add_argument("--state", help='The name of a state, for example "QLD"')
parser.add_argument(
"-n",
"--threads",
help="The number of threads to use",
default=10,
type=int,
choices=range(1, 41),
)
parser.add_argument("--progress", help="Show a progress bar", action=argparse.BooleanOptionalAction)
parser.add_argument("-t", "--time", help="When on auto mode for how many minutes to process suburbs", type=int)
add_db_arguments(parser)
args = parser.parse_args()
update_processed_dates()
db = connect_to_db(args)
# if runtime is specified, then run for that duration, otherwise run for just one suburb
runtime = timedelta(minutes=args.time) if args.time else timedelta()
start_time = datetime.now()
for suburb, state in select_suburb(args.suburb, args.state):
logging.info("Processing %s, %s", suburb.title(), state)
if runtime.total_seconds() > 0:
elapsed_seconds = timedelta(seconds=round((datetime.now() - start_time).total_seconds()))
logging.info("Time elapsed: %s/%s", elapsed_seconds, runtime)
process_suburb(db, state, suburb, args.threads, progress_bar=args.progress)
if datetime.now() > (start_time + runtime):
break
if __name__ == "__main__":
LOGLEVEL = os.environ.get("LOGLEVEL", "INFO").upper()
logging.basicConfig(level=LOGLEVEL, format="%(asctime)s %(levelname)s %(threadName)s %(message)s")
main()
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/code/utils.py | code/utils.py | import copy
import glob
import json
import os
def print_progress_bar(iteration, total, prefix="", suffix="", decimals=1, length=100, fill="█", print_end="\r"):
"""
Call in a loop to create terminal progress bar.
Borrowed from https://stackoverflow.com/questions/3173320/text-progress-bar-in-terminal-with-block-characters
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filled_length = int(length * iteration // total)
bar = fill * filled_length + "-" * (length - filled_length)
print(f"\r{prefix} |{bar}| {percent}% {suffix}", end=print_end)
# Print New Line on Complete
if iteration == total:
print()
def write_json_file(filename: str, data: dict, indent=4):
"""Write a dict to a JSON file."""
with open(filename, "w", encoding="utf-8") as outfile:
json.dump(data, outfile, indent=indent)
def read_json_file(filename: str, empty_if_missing=False) -> dict:
"""Read a dict from a JSON file."""
if empty_if_missing and not os.path.exists(filename):
return {}
with open(filename, encoding="utf-8") as file:
return json.load(file)
def get_all_geojson_files(show_progress: bool = True, rewrite_geojson: bool = False):
"""A generator that returns (filename, geojson_data) for each GeoJSON file in the results directory"""
filenames = glob.glob("results/**/*.geojson")
for n, filename in enumerate(filenames):
if show_progress and n % 100 == 0:
print_progress_bar(n, len(filenames), prefix="Progress:", suffix="Complete", length=50)
geojson_data = read_json_file(filename)
if rewrite_geojson:
# take a copy of the GeoJSON, and if it is modified, write it back to the original file
geojson_data_copy = copy.deepcopy(geojson_data)
yield filename, geojson_data
if geojson_data != geojson_data_copy:
write_json_file(filename, geojson_data, indent=1)
else:
yield filename, geojson_data
# final 100% output
print_progress_bar(1, 1, prefix="Progress:", suffix="Complete", length=50)
def get_all_features(show_progress: bool = True, rewrite_geojson: bool = False):
"""A generator that returns (filename, geojson_data, feature) for every Feature in every GeoJSON file."""
for filename, geojson_data in get_all_geojson_files(show_progress, rewrite_geojson):
for feature in geojson_data["features"]:
yield filename, geojson_data, feature
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/code/suburbs.py | code/suburbs.py | # api for managing the list of suburbs, which ones have been completed,etc.
import dataclasses
import glob
import logging
import os
from collections import Counter
from datetime import datetime
import data
import utils
from geojson import (
get_geojson_file_generated,
get_geojson_file_generated_from_name,
read_geojson_file,
)
def write_all_suburbs(all_suburbs: data.SuburbsByState):
"""Write the new combined file containing all suburbs to a file."""
def _suburb_to_dict(s: data.Suburb) -> dict:
d = dataclasses.asdict(s)
if d["processed_date"]:
d["processed_date"] = d["processed_date"].isoformat()
return d
all_suburbs_dicts = {
state: [_suburb_to_dict(xsuburb) for xsuburb in sorted(suburbs_list)]
for state, suburbs_list in sorted(all_suburbs.items())
}
utils.write_json_file("results/combined-suburbs.json", all_suburbs_dicts, indent=1)
def read_all_suburbs() -> data.SuburbsByState:
"""Read the new combined file list of all suburbs."""
def _dict_to_suburb(d: dict) -> data.Suburb:
d["processed_date"] = datetime.fromisoformat(d["processed_date"]) if d["processed_date"] else None
d.pop("announced_date", None) # TODO: remove old field
return data.Suburb(**d)
results = utils.read_json_file("results/combined-suburbs.json")
# TODO: convert to dict[str, dict[str, data.Suburb]] (state->suburub_name->Suburb)
return {state: sorted(_dict_to_suburb(d) for d in results[state]) for state in sorted(results)}
def update_processed_dates():
"""Check if any new/updated geojson files need to be updated in the all-suburbs file."""
logging.info("Checking for externally updated geojson results...")
all_suburbs = read_all_suburbs()
changed = False
for state in data.STATES:
file_suburb_map = {suburb.file: suburb for suburb in all_suburbs.get(state, [])}
for file in glob.glob(f"results/{state}/*.geojson"):
this_file = os.path.splitext(os.path.basename(file))[0]
this_suburb = file_suburb_map.get(this_file)
generated = get_geojson_file_generated(file)
if this_suburb is None:
# missing from the combined file: add it
this_geojson = utils.read_json_file(file)
this_suburb = data.Suburb(
name=this_geojson["suburb"].title(),
processed_date=datetime.fromisoformat(this_geojson["generated"]),
address_count=len(this_geojson["features"]),
)
logging.warning(" Adding suburb from file: %s, %s", this_suburb.name, state)
all_suburbs[state].append(this_suburb)
changed = True
elif this_suburb.processed_date is None or (generated - this_suburb.processed_date).total_seconds() > 0:
logging.info(" Updating %s/%s processed date %s", state, this_suburb.name, generated)
this_suburb.processed_date = generated
# Update address_count to match the actual number of features in the GeoJSON file
this_geojson = utils.read_json_file(file)
this_suburb.address_count = len(this_geojson["features"])
changed = True
if changed:
write_all_suburbs(all_suburbs)
logging.info("...done")
def update_suburb_in_all_suburbs(suburb: str, state: str) -> data.SuburbsByState:
"""Update the suburb in the combined file."""
suburb = suburb.title()
all_suburbs = read_all_suburbs()
found_suburb = next(s for s in all_suburbs[state.upper()] if s.name == suburb)
found_suburb.processed_date = get_geojson_file_generated_from_name(suburb, state)
if found_suburb.processed_date is None:
found_suburb.processed_date = datetime.now()
# Update address_count to match the actual number of features in the GeoJSON file
geojson_data = read_geojson_file(suburb, state)
if geojson_data:
found_suburb.address_count = len(geojson_data["features"])
else:
# If GeoJSON file doesn't exist, set address_count to 0
logging.warning("GeoJSON file not found for %s, %s - setting address_count to 0", suburb, state)
found_suburb.address_count = 0
write_all_suburbs(all_suburbs)
update_progress()
return all_suburbs
def _format_percent(numerator: int, denominator: int, default=100.0):
"""Format a percentage as a string."""
return round(numerator / denominator * 100.0, 1) if denominator else default
def _get_completion_progress(suburb_list) -> dict:
"""Return done/total/progress dict for all suburbs in the given list"""
tally = Counter(suburb.processed_date is not None for suburb in suburb_list)
return {
"done": tally.get(True, 0),
"total": tally.total(),
"percent": _format_percent(tally.get(True, 0), tally.total()),
}
def _add_total_progress(progress: dict):
"""Add a TOTAL entry to the given progress dict."""
progress["TOTAL"] = {
"done": sum(p["done"] for p in progress.values()),
"total": sum(p["total"] for p in progress.values()),
}
progress["TOTAL"]["percent"] = _format_percent(progress["TOTAL"]["done"], progress["TOTAL"]["total"])
def get_suburb_progress() -> dict:
"""Calculate a state-by-state progress indicator vs the named list of states+suburbs."""
progress = {"all": {}}
for state, suburb_list in read_all_suburbs().items():
progress["all"][state] = _get_completion_progress(suburb_list)
_add_total_progress(progress["all"])
return progress
def get_address_progress() -> dict:
"""Calculate a state-by-state progress indicator vs the named list of states+suburbs."""
progress = {"all": {}}
for state, suburb_list in read_all_suburbs().items():
tot_addresses = 0
tot_done = 0
for suburb in suburb_list:
tot_addresses += suburb.address_count
if suburb.processed_date is not None:
tot_done += suburb.address_count
progress["all"][state] = {
"done": tot_done,
"total": tot_addresses,
"percent": _format_percent(tot_done, tot_addresses),
}
_add_total_progress(progress["all"])
return progress
def get_technology_breakdown() -> dict:
"""Calculate a state-by-state breakdown of technology used."""
breakdown = {}
for state, suburb_list in read_all_suburbs().items():
tally = Counter(
address["properties"]["tech"]
for suburb in suburb_list
for address in read_geojson_file(suburb.name, state)["features"]
)
breakdown[state] = {
"FTTN": tally.get("FTTN", 0),
"FTTP": tally.get("FTTP", 0),
"FTTB": tally.get("FTTB", 0),
"FTTC": tally.get("FTTC", 0),
"HFC": tally.get("HFC", 0),
"WIRELESS": tally.get("WIRELESS", 0),
"SATELLITE": tally.get("SATELLITE", 0),
"total": tally.total(),
}
breakdown["TOTAL"] = {
key: sum(breakdown[state][key] for state in breakdown) for key in breakdown[next(iter(breakdown))]
}
return breakdown
def get_last_updated_breakdown() -> dict:
"""Calculate a state-by-state breakdown of last updated date."""
progress = {"all": {}}
current_date = datetime.now()
for state, suburb_list in read_all_suburbs().items():
oldest_all = min(
(suburb.processed_date for suburb in suburb_list if suburb.processed_date is not None), default=None
)
progress["all"][state] = {
"date": oldest_all.strftime("%Y-%m-%d") if oldest_all else None,
"days": (current_date - oldest_all).days if oldest_all else None,
}
progress["all"]["TOTAL"] = {
"date": min(
(progress["all"][state]["date"] for state in progress["all"] if progress["all"][state]["date"] is not None),
default=None,
),
"days": max(
(progress["all"][state]["days"] for state in progress["all"] if progress["all"][state]["days"] is not None),
default=None,
),
}
return progress
def update_progress():
"""Update the progress.json file with the latest results."""
results = {
"suburbs": get_suburb_progress(),
"addresses": get_address_progress(),
"last_updated": get_last_updated_breakdown(),
}
logging.info("Updating progress.json")
utils.write_json_file("results/progress.json", results)
return results["suburbs"]
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/code/data.py | code/data.py | from dataclasses import dataclass
from datetime import datetime
STATES_MAP = {
"New South Wales": "NSW",
"ACT": "ACT",
"Victoria": "VIC",
"Queensland": "QLD",
"South Australia": "SA",
"Western Australia": "WA",
"Tasmania": "TAS",
"Northern Territory": "NT",
"Other Territories": "OT",
}
STATES = sorted(STATES_MAP.values())
@dataclass(slots=True)
class Address:
"""A single address in a suburb."""
name: str
gnaf_pid: str
longitude: float
latitude: float
loc_id: str = None
tech: str = None
upgrade: str = None
tech_change_status: str = None
program_type: str = None
target_eligibility_quarter: str = None
AddressList = list[Address]
@dataclass(slots=True)
class Suburb:
"""Details about a Suburb."""
name: str
processed_date: datetime = None
address_count: int = 0
@property
def internal(self):
"""Return the "internal" representation of the suburb name, e.g. "Brisbane City" -> "BRISBANE-CITY"."""
return self.name.upper().replace(" ", "-")
@property
def file(self):
"""Return the "file" representation of the suburb name, e.g. "Brisbane City" -> "brisbane-city"."""
return self.name.lower().replace(" ", "-")
def __eq__(self, other):
return self.name == other.name
def __lt__(self, other):
return self.name < other.name
SuburbsByState = dict[str, list[Suburb]]
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
LukePrior/nbn-upgrade-map | https://github.com/LukePrior/nbn-upgrade-map/blob/9bb664084224c03ec53dab555c80adde4d491466/code/nbn.py | code/nbn.py | import difflib
import logging
import urllib.parse
import diskcache
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
# 1GB LRU cache of gnaf_pid->loc_id and loc_id->details
CACHE = diskcache.Cache("cache")
class NBNApi:
"""Interacts with NBN's unofficial API."""
LOOKUP_URL = "https://places.nbnco.net.au/places/v1/autocomplete?query="
DETAIL_URL = "https://places.nbnco.net.au/places/v2/details/"
HEADERS = {"referer": "https://www.nbnco.com.au/"}
def __init__(self):
self.session = requests.Session()
self.session.mount("http://", HTTPAdapter(max_retries=(Retry(total=5))))
def close(self):
"""Close the cache."""
# TODO Each thread that accesses a cache should also call close on the cache.
CACHE.close()
def get_nbn_data_json(self, url) -> dict:
"""Gets a JSON response from a URL."""
r = self.session.get(url, stream=True, headers=self.HEADERS)
r.raise_for_status()
return r.json()
def get_nbn_loc_id(self, key: str, address: str) -> str:
"""Return the NBN locID for the provided address, or None if there was an error."""
if key in CACHE:
return CACHE[key]
result = self.get_nbn_data_json(self.LOOKUP_URL + urllib.parse.quote(address))
suggestions = result.get("suggestions", [])
suggestions = [s for s in suggestions if "id" in s and s["id"].startswith("LOC")]
suggestions = sorted(
suggestions,
key=lambda s: difflib.SequenceMatcher(None, address, s["formattedAddress"]).ratio(),
reverse=True,
)
if suggestions:
loc_id = suggestions[0]["id"]
CACHE[key] = loc_id # cache indefinitely
return loc_id
else:
# In future use the NBN Nearby API with lat/long to get suggestions.
logging.warning("No valid suggestions for %s", address)
def get_nbn_loc_details(self, place_id: str) -> dict:
"""Return the NBN details for the provided id, or None if there was an error."""
if place_id in CACHE:
return CACHE[place_id]
if not place_id.startswith("LOC"):
raise ValueError(f"Invalid place_id: {place_id}")
details = self.get_nbn_data_json(self.DETAIL_URL + place_id)
CACHE.set(place_id, details, expire=60 * 60 * 24 * 7) # cache for 7 days
return details
| python | MIT | 9bb664084224c03ec53dab555c80adde4d491466 | 2026-01-05T06:50:31.043193Z | false |
covid19india/api | https://github.com/covid19india/api/blob/3303854897685cfeaefed458748771b6ea63bf5c/src/parser_v3.py | src/parser_v3.py | #!/usr/bin/env python3
import csv
import logging
import json
import yaml
from collections import defaultdict, OrderedDict
from datetime import datetime, timedelta
from pathlib import Path
# Set logging level
logging.basicConfig(handlers=[logging.NullHandler()],
format='%(message)s',
level=logging.INFO)
# Current date in India
INDIA_DATE = datetime.strftime(
datetime.utcnow() + timedelta(hours=5, minutes=30), '%Y-%m-%d')
INPUT_DIR = Path('tmp')
# Contains state codes to be used as API keys
META_DATA = INPUT_DIR / 'misc.json'
# Contains list of geographical districts
DISTRICT_LIST = INPUT_DIR / 'state_district_wise.json'
# All raw_data's
RAW_DATA = 'raw_data{n}.json'
# Contains deaths and recoveries for entries in raw_data1 and raw_data2
OUTCOME_DATA = 'deaths_recoveries{n}.json'
# Contains district data on 26th April
DISTRICT_DATA_GOSPEL = INPUT_DIR / 'csv' / 'latest' / 'districts_26apr_gospel.csv'
GOSPEL_DATE = '2020-04-26'
# India testing data
ICMR_TEST_DATA = INPUT_DIR / 'data.json'
# States testing data
STATE_TEST_DATA = INPUT_DIR / 'state_test_data.json'
# District testing data
DISTRICT_TEST_DATA = INPUT_DIR / 'csv' / 'latest' / 'district_testing.csv'
## For adding metadata
# For state notes and last updated
STATE_WISE = INPUT_DIR / 'data.json'
# For district notes
DISTRICT_WISE = INPUT_DIR / 'state_district_wise.json'
OUTPUT_DIR = Path('tmp', 'v3')
OUTPUT_MIN_DIR = OUTPUT_DIR / 'min'
OUTPUT_DATA_PREFIX = 'data'
OUTPUT_TIMESERIES_FILENAME = 'timeseries'
# Two digit state codes
STATE_CODES = {}
# State codes to state names map (capitalized appropriately)
STATE_NAMES = {}
# State/district populations
STATE_POPULATIONS = {}
DISTRICT_POPULATIONS = defaultdict(dict)
# Code corresponding to MoHFW's 'Unassigned States' in sheet
UNASSIGNED_STATE_CODE = 'UN'
# Dict containing geographical districts
DISTRICTS_DICT = defaultdict(dict)
# District key to give to unkown district values in raw_data
UNKNOWN_DISTRICT_KEY = 'Unknown'
PRIMARY_STATISTICS = ['confirmed', 'deceased', 'recovered']
RAW_DATA_MAP = {
'hospitalized': 'confirmed',
'deceased': 'deceased',
'recovered': 'recovered',
'migrated_other': 'migrated',
}
# Log statements width
PRINT_WIDTH = 70
# Nested default dict of dict
ddict = lambda: defaultdict(ddict)
# Dictionaries which stored final parsed data
data = ddict()
timeseries = ddict()
def parse_state_metadata(raw_data):
for i, entry in enumerate(raw_data['state_meta_data']):
# State name with sheet capitalization
state_name = entry['stateut'].strip()
# State code caps
state_code = entry['abbreviation'].strip().upper()
STATE_CODES[state_name.lower()] = state_code
STATE_NAMES[state_code] = state_name
# State population
try:
population = int(entry['population'].strip())
except ValueError:
if entry['population']:
logging.warning('[L{}] [Bad population: {}] {}'.format(
i + 2, entry['population'], state_code))
continue
STATE_POPULATIONS[state_code] = population
def parse_district_list(raw_data):
for i, entry in enumerate(raw_data.values()):
state = entry['statecode'].strip().upper()
if state not in STATE_CODES.values():
logging.warning('[L{}] Bad state: {}'.format(i + 2, entry['statecode']))
continue
if 'districtData' not in entry:
continue
for district in entry['districtData']:
district = district.strip()
DISTRICTS_DICT[state][district.lower()] = district
def parse_district(district, state):
district = district.strip()
expected = True
if not district or district.lower() == 'unknown':
district = UNKNOWN_DISTRICT_KEY
elif district.lower() in DISTRICTS_DICT[state]:
district = DISTRICTS_DICT[state][district.lower()]
else:
expected = False
return district, expected
def parse_district_metadata(raw_data):
for i, entry in enumerate(raw_data['district_meta_data']):
# State code
state = entry['statecode'].strip().upper()
if state not in STATE_CODES.values():
logging.warning('[L{}] Bad state: {}'.format(i + 2, state))
continue
# District name with sheet capitalization
district, expected = parse_district(entry['district'], state)
if not expected:
logging.warning('[L{}] [{}] Unexpected district: {}'.format(
i + 2, state, district))
# District population
try:
population = int(entry['population'].strip())
except ValueError:
if entry['population']:
logging.warning('[L{}] [Bad population: {}] {}: {}'.format(
i + 2, entry['population'], state, district))
continue
DISTRICT_POPULATIONS[state][district] = population
def inc(ref, key, count):
if not isinstance(ref[key], int):
# Initialize with 0
ref[key] = 0
# Increment
ref[key] += count
def parse(raw_data, i):
for j, entry in enumerate(raw_data['raw_data']):
state_name = entry['detectedstate'].strip().lower()
try:
state = STATE_CODES[state_name]
except KeyError:
# Entries with empty state names are discarded
if state_name:
# Unrecognized state entries are discarded and logged
logging.warning('[L{}] [{}] [Bad state: {}] {}'.format(
j + 2, entry['dateannounced'], entry['detectedstate'],
entry['numcases']))
continue
try:
fdate = datetime.strptime(entry['dateannounced'].strip(), '%d/%m/%Y')
date = datetime.strftime(fdate, '%Y-%m-%d')
if date > INDIA_DATE:
# Entries from future dates will be ignored
logging.warning('[L{}] [Future date: {}] {}: {} {}'.format(
j + 2, entry['dateannounced'], entry['detectedstate'],
entry['detecteddistrict'], entry['numcases']))
continue
except ValueError:
# Bad date
logging.warning('[L{}] [Bad date: {}] {}: {} {}'.format(
j + 2, entry['dateannounced'], entry['detectedstate'],
entry['detecteddistrict'], entry['numcases']))
continue
district, expected = parse_district(entry['detecteddistrict'], state)
if not expected:
# Print unexpected district names
logging.warning('[L{}] [{}] [Unexpected district: {} ({})] {}'.format(
j + 2, entry['dateannounced'], district, state, entry['numcases']))
try:
count = int(entry['numcases'].strip())
except ValueError:
logging.warning('[L{}] [{}] [Bad numcases: {}] {}: {}'.format(
j + 2, entry['dateannounced'], entry['numcases'], state, district))
continue
if count:
try:
# All rows in v1 and v2 are confirmed cases
statistic = 'confirmed' if i < 3 else RAW_DATA_MAP[
entry['currentstatus'].strip().lower()]
inc(data[date]['TT']['delta'], statistic, count)
inc(data[date][state]['delta'], statistic, count)
# Don't parse old district data since it's unreliable
if i > 2 and date > GOSPEL_DATE and state != UNASSIGNED_STATE_CODE:
inc(data[date][state]['districts'][district]['delta'], statistic,
count)
except KeyError:
# Unrecognized status
logging.warning('[L{}] [{}] [Bad currentstatus: {}] {}: {} {}'.format(
j + 2, entry['dateannounced'], entry['currentstatus'], state,
district, entry['numcases']))
def parse_outcome(outcome_data, i):
for j, entry in enumerate(outcome_data['deaths_recoveries']):
state_name = entry['state'].strip().lower()
try:
state = STATE_CODES[state_name]
except KeyError:
# Entries with empty state names are discarded
if state_name:
# Unrecognized state entries are discarded and logged
logging.warning('[L{}] [{}] [Bad state: {}]'.format(
j + 2, entry['date'], entry['state']))
continue
try:
fdate = datetime.strptime(entry['date'].strip(), '%d/%m/%Y')
date = datetime.strftime(fdate, '%Y-%m-%d')
if date > INDIA_DATE:
# Entries from future dates will be ignored
logging.warning('[L{}] [Future date: {}] {}'.format(
j + 2, entry['date'], state))
continue
except ValueError:
# Bad date
logging.warning('[L{}] [Bad date: {}] {}'.format(j + 2, entry['date'],
state))
continue
district, expected = parse_district(entry['district'], state)
if not expected:
# Print unexpected district names
logging.warning('[L{}] [{}] [Unexpected district: {} ({})] {}'.format(
j + 2, entry['date'], district, state, entry['numcases']))
try:
statistic = RAW_DATA_MAP[entry['patientstatus'].strip().lower()]
inc(data[date]['TT']['delta'], statistic, 1)
inc(data[date][state]['delta'], statistic, 1)
## Don't parse old district data since it's unreliable
# inc(data[date][state]['districts'][district]['delta'], statistic,
# 1)
except KeyError:
# Unrecognized status
logging.warning('[L{}] [{}] [Bad patientstatus: {}] {}: {}'.format(
j + 2, entry['date'], entry['patientstatus'], state, district))
def parse_district_gospel(reader):
for i, row in enumerate(reader):
state = row['State_Code'].strip().upper()
if state not in STATE_CODES.values():
logging.warning('[{}] Bad state: {}'.format(i, state))
continue
district, expected = parse_district(row['District'], state)
if not expected:
# Print unexpected district names
logging.warning('[{}] Unexpected district: {} {}'.format(
i, state, district))
for statistic in PRIMARY_STATISTICS:
count = int(row[statistic.capitalize()] or 0)
if count:
data[GOSPEL_DATE][state]['districts'][district]['total'][
statistic] = count
def parse_icmr(icmr_data):
for j, entry in enumerate(icmr_data['tested']):
count_str = entry['totalsamplestested'].strip()
try:
fdate = datetime.strptime(entry['testedasof'].strip(), '%d/%m/%Y')
date = datetime.strftime(fdate, '%Y-%m-%d')
if date > INDIA_DATE:
# Entries from future dates will be ignored
if count_str:
# Log non-zero entries
logging.warning('[L{}] [Future date: {}]'.format(
j + 2, entry['testedasof']))
continue
except ValueError:
# Bad timestamp
logging.warning('[L{}] [Bad date: {}]'.format(j + 2,
entry['testedasof']))
continue
try:
count = int(count_str)
except ValueError:
logging.warning('[L{}] [{}] [Bad totalsamplestested: {}]'.format(
j + 2, entry['testedasof'], entry['totalsamplestested']))
continue
if count:
data[date]['TT']['total']['tested'] = count
data[date]['TT']['meta']['tested']['source'] = entry['source'].strip()
data[date]['TT']['meta']['tested']['last_updated'] = date
def parse_state_test(raw_data):
for j, entry in enumerate(raw_data['states_tested_data']):
count_str = entry['totaltested'].strip()
try:
fdate = datetime.strptime(entry['updatedon'].strip(), '%d/%m/%Y')
date = datetime.strftime(fdate, '%Y-%m-%d')
if date > INDIA_DATE:
# Entries from future dates will be ignored
if count_str:
# Log non-zero entries
logging.warning('[L{}] [Future date: {}] {}'.format(
j + 2, entry['updatedon'], entry['state']))
continue
except ValueError:
# Bad date
logging.warning('[L{}] [Bad date: {}] {}'.format(j + 2,
entry['updatedon'],
entry['state']))
continue
state_name = entry['state'].strip().lower()
try:
state = STATE_CODES[state_name]
except KeyError:
# Entries having unrecognized state names are discarded
logging.warning('[L{}] [{}] [Bad state: {}]'.format(
j + 2, entry['updatedon'], entry['state']))
continue
try:
count = int(count_str)
except ValueError:
logging.warning('[L{}] [{}] [Bad totaltested: {}] {}'.format(
j + 2, entry['updatedon'], entry['totaltested'], entry['state']))
continue
if count:
data[date][state]['total']['tested'] = count
data[date][state]['meta']['tested']['source'] = entry['source1'].strip()
data[date][state]['meta']['tested']['last_updated'] = date
def column_str(n):
alpha = ''
while n > 0:
n, rem = divmod(n - 1, 26)
alpha = chr(65 + rem) + alpha
return alpha
def parse_district_test(reader):
# Header row
header = next(reader)
# Store formatted dates
dates = ['' for _ in header]
# Columns >= 6 contain dates
for j in range(6, len(header), 5):
try:
fdate = datetime.strptime(header[j].strip(), '%d/%m/%Y')
date = datetime.strftime(fdate, '%Y-%m-%d')
if date <= INDIA_DATE:
# Only keep entries from present or past dates
dates[j] = date
except ValueError:
# Bad date
logging.warning('[{}] Bad date: {}'.format(column_str(j), header[j]))
# Skip second row
next(reader)
for i, row in enumerate(reader):
# Column 3 contains state name
state_name = row[3].strip().lower()
try:
state = STATE_CODES[state_name]
except KeyError:
# Entries having unrecognized state names are discarded
logging.warning('[L{}] Bad state: {}'.format(i + 3, row[3]))
continue
# Column 4 contains district name
district, expected = parse_district(row[4], state)
if not expected:
# Print unexpected district names
logging.warning('[L{}] Unexpected district: {} {}'.format(
i + 3, state, district))
# Testing data starts from column 6
for j in range(6, len(row), 5):
# Date header
date = dates[j]
if not date:
# Skip future date
continue
# | Tested | Positive | Negative | Source1 | Source2 |
try:
count = int(row[j].strip())
except ValueError:
if row[j]:
logging.warning('[L{} {}] [{}: {}] Bad Tested: {}'.format(
i + 3, column_str(j), state, district, row[j]))
continue
# Use Source1 key as source
source = row[j + 3].strip()
if count:
data[date][state]['districts'][district]['total']['tested'] = count
data[date][state]['districts'][district]['meta']['tested'][
'source'] = source
data[date][state]['districts'][district]['meta']['tested'][
'last_updated'] = date
def fill_tested():
dates = sorted(data)
for i, date in enumerate(dates):
curr_data = data[date]
# Initialize today's delta with today's cumulative
for state, state_data in curr_data.items():
if 'total' in state_data and 'tested' in state_data['total']:
state_data['delta']['tested'] = state_data['total']['tested']
if 'districts' not in state_data:
continue
for district, district_data in state_data['districts'].items():
if 'total' in district_data and 'tested' in district_data['total']:
district_data['delta']['tested'] = district_data['total']['tested']
if i > 0:
prev_date = dates[i - 1]
prev_data = data[prev_date]
for state, state_data in prev_data.items():
if 'total' in state_data and 'tested' in state_data['total']:
if 'tested' in curr_data[state]['total']:
# Subtract previous cumulative to get delta
curr_data[state]['delta']['tested'] -= state_data['total'][
'tested']
else:
# Take today's cumulative to be same as yesterday's
# cumulative if today's cumulative is missing
curr_data[state]['total']['tested'] = state_data['total']['tested']
curr_data[state]['meta']['tested']['source'] = state_data['meta'][
'tested']['source']
curr_data[state]['meta']['tested']['last_updated'] = state_data[
'meta']['tested']['last_updated']
if 'districts' not in state_data:
continue
for district, district_data in state_data['districts'].items():
if 'total' in district_data and 'tested' in district_data['total']:
if 'tested' in curr_data[state]['districts'][district]['total']:
# Subtract previous cumulative to get delta
curr_data[state]['districts'][district]['delta'][
'tested'] -= district_data['total']['tested']
else:
# Take today's cumulative to be same as yesterday's
# cumulative if today's cumulative is missing
curr_data[state]['districts'][district]['total'][
'tested'] = district_data['total']['tested']
curr_data[state]['districts'][district]['meta']['tested'][
'source'] = district_data['meta']['tested']['source']
curr_data[state]['districts'][district]['meta']['tested'][
'last_updated'] = district_data['meta']['tested'][
'last_updated']
def accumulate(start_after_date='', end_date='3020-01-30'):
dates = sorted(data)
for i, date in enumerate(dates):
if date <= start_after_date:
continue
elif date > end_date:
break
curr_data = data[date]
if i > 0:
# Initialize today's cumulative with previous available
prev_date = dates[i - 1]
prev_data = data[prev_date]
for state, state_data in prev_data.items():
for statistic in RAW_DATA_MAP.values():
if statistic in state_data['total']:
inc(curr_data[state]['total'], statistic,
state_data['total'][statistic])
if 'districts' not in state_data or date <= GOSPEL_DATE:
# Old district data is already accumulated
continue
for district, district_data in state_data['districts'].items():
for statistic in RAW_DATA_MAP.values():
if statistic in district_data['total']:
inc(curr_data[state]['districts'][district]['total'], statistic,
district_data['total'][statistic])
# Add today's dailys to today's cumulative
for state, state_data in curr_data.items():
if 'delta' in state_data:
for statistic in RAW_DATA_MAP.values():
if statistic in state_data['delta']:
inc(state_data['total'], statistic, state_data['delta'][statistic])
if 'districts' not in state_data or date <= GOSPEL_DATE:
# Old district data is already accumulated
continue
for district, district_data in state_data['districts'].items():
if 'delta' in district_data:
for statistic in RAW_DATA_MAP.values():
if statistic in district_data['delta']:
inc(district_data['total'], statistic,
district_data['delta'][statistic])
def fill_gospel_unknown():
# Gospel doesn't contain unknowns
# Fill them based on gospel date state counts
curr_data = data[GOSPEL_DATE]
for state, state_data in curr_data.items():
if 'districts' not in state_data or 'total' not in state_data:
# State had no cases yet
continue
sum_district_totals = defaultdict(lambda: 0)
for district, district_data in state_data['districts'].items():
if 'total' in district_data:
for statistic, count in district_data['total'].items():
sum_district_totals[statistic] += count
for statistic in PRIMARY_STATISTICS:
if statistic in state_data['total']:
count = state_data['total'][statistic]
if count != sum_district_totals[statistic]:
# Counts don't match
# We take Unknown district values = State - Sum(districts gospel)
state_data['districts'][UNKNOWN_DISTRICT_KEY]['total'][
statistic] = count - sum_district_totals[statistic]
def stripper(raw_data, dtype=ddict):
# Remove empty entries
new_data = dtype()
for k, v in raw_data.items():
if isinstance(v, dict):
v = stripper(v, dtype)
if v:
new_data[k] = v
return new_data
def add_populations():
# Add population data for states/districts
for curr_data in data.values():
for state, state_data in curr_data.items():
try:
state_pop = STATE_POPULATIONS[state]
state_data['meta']['population'] = state_pop
except KeyError:
pass
if 'districts' not in state_data:
continue
for district, district_data in state_data['districts'].items():
try:
district_pop = DISTRICT_POPULATIONS[state][district]
district_data['meta']['population'] = district_pop
except KeyError:
pass
def generate_timeseries(districts=False):
for date in sorted(data):
curr_data = data[date]
for state, state_data in curr_data.items():
for stype in ['total', 'delta']:
if stype in state_data:
for statistic, value in state_data[stype].items():
timeseries[state][date][stype][statistic] = value
if not districts or 'districts' not in state_data or date <= GOSPEL_DATE:
# Total state has no district data
# District timeseries starts only from 26th April
continue
for district, district_data in state_data['districts'].items():
for stype in ['total', 'delta']:
if stype in district_data:
for statistic, value in district_data[stype].items():
timeseries[state]['districts'][district][date][stype][
statistic] = value
def add_state_meta(raw_data):
last_data = data[sorted(data)[-1]]
for j, entry in enumerate(raw_data['statewise']):
state = entry['statecode'].strip().upper()
if state not in STATE_CODES.values():
# Entries having unrecognized state codes are discarded
logging.warning('[L{}] [{}] Bad state: {}'.format(
j + 2, entry['lastupdatedtime'], entry['statecode']))
continue
try:
fdate = datetime.strptime(entry['lastupdatedtime'].strip(),
'%d/%m/%Y %H:%M:%S')
except ValueError:
# Bad timestamp
logging.warning('[L{}] [Bad timestamp: {}] {}'.format(
j + 2, entry['lastupdatedtime'], state))
continue
last_data[state]['meta']['last_updated'] = fdate.isoformat() + '+05:30'
if entry['statenotes']:
last_data[state]['meta']['notes'] = entry['statenotes'].strip()
def add_district_meta(raw_data):
last_data = data[sorted(data)[-1]]
for j, entry in enumerate(raw_data.values()):
state = entry['statecode'].strip().upper()
if state not in STATE_CODES.values():
# Entries having unrecognized state codes are discarded
logging.warning('[L{}] Bad state: {}'.format(j + 2, entry['statecode']))
continue
for district, district_data in entry['districtData'].items():
district, expected = parse_district(district, state)
if not expected:
logging.warning('[L{}] Unexpected district: {} {}'.format(
j + 2, state, district))
if district_data['notes']:
last_data[state]['districts'][district]['meta'][
'notes'] = district_data['notes'].strip()
def tally_statewise(raw_data):
last_data = data[sorted(data)[-1]]
# Check for extra entries
logging.info('Checking for extra entries...')
for state, state_data in last_data.items():
found = False
for entry in raw_data['statewise']:
if state == entry['statecode'].strip().upper():
found = True
break
if not found:
logging.warning(yaml.dump(stripper({state: state_data}, dtype=dict)))
logging.info('Done!')
# Tally counts of entries present in statewise
logging.info('Tallying final date counts...')
for j, entry in enumerate(raw_data['statewise']):
state = entry['statecode'].strip().upper()
if state not in STATE_CODES.values():
continue
try:
fdate = datetime.strptime(entry['lastupdatedtime'].strip(),
'%d/%m/%Y %H:%M:%S')
except ValueError:
# Bad timestamp
logging.warning('[L{}] [Bad timestamp: {}] {}'.format(
j + 2, entry['lastupdatedtime'], state))
continue
for statistic in PRIMARY_STATISTICS:
try:
values = {
'total':
int(entry[statistic if statistic != 'deceased' else 'deaths'].
strip()),
'delta':
int(entry['delta' + (
statistic if statistic != 'deceased' else 'deaths').strip()])
}
except ValueError:
logging.warning('[L{}] [{}] [Bad value for {}] {}'.format(
j + 2, entry['lastupdatedtime'], statistic, state))
continue
for stype in ['total', 'delta']:
if values[stype]:
parsed_value = last_data[state][stype][statistic]
if not isinstance(parsed_value, int):
parsed_value = 0
if values[stype] != parsed_value:
# Print mismatch between statewise and parser
logging.warning('{} {} {}: (sheet: {}, parser: {})'.format(
state, statistic, stype, values[stype], parsed_value))
def tally_districtwise(raw_data):
last_data = data[sorted(data)[-1]]
# Check for extra entries
logging.info('Checking for extra entries...')
for state, state_data in last_data.items():
if 'districts' not in state_data:
continue
state_name = STATE_NAMES[state]
if state_name in raw_data:
for district, district_data in state_data['districts'].items():
found = False
for entryDistrict in raw_data[state_name]['districtData'].keys():
entryDistrict, _ = parse_district(entryDistrict, state)
if district == entryDistrict:
found = True
break
if not found:
key = '{} ({})'.format(district, state)
logging.warning(yaml.dump(stripper({key: district_data},
dtype=dict)))
else:
logging.warning(yaml.dump(stripper({state: state_data}, dtype=dict)))
logging.info('Done!')
# Tally counts of entries present in districtwise
logging.info('Tallying final date counts...')
for j, entry in enumerate(raw_data.values()):
state = entry['statecode'].strip().upper()
if state not in STATE_CODES.values():
continue
for district, district_data in entry['districtData'].items():
district, _ = parse_district(district, state)
for statistic in PRIMARY_STATISTICS:
values = {
'total': district_data[statistic],
'delta': district_data['delta'][statistic]
}
for stype in ['total', 'delta']:
if values[stype]:
parsed_value = last_data[state]['districts'][district][stype][
statistic]
if not isinstance(parsed_value, int):
parsed_value = 0
if values[stype] != parsed_value:
# Print mismatch between districtwise and parser
logging.warning('{} {} {} {}: (sheet: {}, parser: {})'.format(
state, district, statistic, stype, values[stype],
parsed_value))
if __name__ == '__main__':
logging.info('-' * PRINT_WIDTH)
logging.info('{:{align}{width}}'.format('PARSER V3 START',
align='^',
width=PRINT_WIDTH))
# Get possible state codes, populations
logging.info('-' * PRINT_WIDTH)
logging.info('Parsing state metadata...')
with open(META_DATA, 'r') as f:
logging.info('File: {}'.format(META_DATA.name))
raw_data = json.load(f)
parse_state_metadata(raw_data)
logging.info('Done!')
# Get all actual district names
logging.info('-' * PRINT_WIDTH)
logging.info('Parsing districts list...')
with open(DISTRICT_LIST, 'r') as f:
logging.info('File: {}'.format(DISTRICT_LIST.name))
raw_data = json.load(f)
parse_district_list(raw_data)
logging.info('Done!')
# Get district populations
logging.info('-' * PRINT_WIDTH)
logging.info('Parsing district metadata...')
with open(META_DATA, 'r') as f:
logging.info('File: {}'.format(META_DATA.name))
raw_data = json.load(f)
parse_district_metadata(raw_data)
logging.info('Done!')
# Parse raw_data's
logging.info('-' * PRINT_WIDTH)
logging.info('Parsing raw_data...')
i = 1
while True:
f = INPUT_DIR / RAW_DATA.format(n=i)
if not f.is_file():
break
with open(f, 'r') as f:
logging.info('File: {}'.format(RAW_DATA.format(n=i)))
raw_data = json.load(f)
parse(raw_data, i)
i += 1
logging.info('Done!')
# Parse additional deceased/recovered info not in raw_data 1 and 2
logging.info('-' * PRINT_WIDTH)
logging.info('Parsing deaths_recoveries...')
for i in [1, 2]:
f = INPUT_DIR / OUTCOME_DATA.format(n=i)
with open(f, 'r') as f:
logging.info('File: {}'.format(OUTCOME_DATA.format(n=i)))
raw_data = json.load(f)
parse_outcome(raw_data, i)
logging.info('Done!')
logging.info('-' * PRINT_WIDTH)
logging.info('Adding district data for 26th April...')
# Parse gospel district data for 26th April
with open(DISTRICT_DATA_GOSPEL, 'r') as f:
logging.info('File: {}'.format(DISTRICT_DATA_GOSPEL.name))
reader = csv.DictReader(f)
parse_district_gospel(reader)
logging.info('Done!')
logging.info('-' * PRINT_WIDTH)
logging.info('Parsing ICMR test data for India...')
f = ICMR_TEST_DATA
with open(f, 'r') as f:
logging.info('File: {}'.format(ICMR_TEST_DATA.name))
raw_data = json.load(f, object_pairs_hook=OrderedDict)
parse_icmr(raw_data)
logging.info('Done!')
logging.info('-' * PRINT_WIDTH)
logging.info('Parsing test data for all states...')
f = STATE_TEST_DATA
with open(f, 'r') as f:
logging.info('File: {}'.format(STATE_TEST_DATA.name))
raw_data = json.load(f, object_pairs_hook=OrderedDict)
parse_state_test(raw_data)
logging.info('Done!')
logging.info('-' * PRINT_WIDTH)
logging.info('Parsing test data for districts...')
f = DISTRICT_TEST_DATA
with open(f, 'r') as f:
logging.info('File: {}'.format(DISTRICT_TEST_DATA.name))
reader = csv.reader(f)
parse_district_test(reader)
logging.info('Done!')
# Fill delta values for tested
logging.info('-' * PRINT_WIDTH)
logging.info('Generating daily tested values...')
fill_tested()
logging.info('Done!')
# Generate total (cumulative) data points till 26th April
logging.info('-' * PRINT_WIDTH)
logging.info('Generating cumulative CRD values till 26th April...')
accumulate(end_date=GOSPEL_DATE)
logging.info('Done!')
# Fill Unknown district counts for 26th April
logging.info('-' * PRINT_WIDTH)
logging.info(
'Filling {} data for 26th April...'.format(UNKNOWN_DISTRICT_KEY))
fill_gospel_unknown()
logging.info('Done!')
# Generate rest of total (cumulative) data points
logging.info('-' * PRINT_WIDTH)
logging.info(
'Generating cumulative CRD values from 26th April afterwards...')
accumulate(start_after_date=GOSPEL_DATE)
logging.info('Done!')
# Strip empty values ({}, 0, '', None)
logging.info('-' * PRINT_WIDTH)
logging.info('Stripping empty values...')
data = stripper(data)
logging.info('Done!')
# Add population figures
logging.info('-' * PRINT_WIDTH)
logging.info('Adding state/district populations...')
add_populations()
logging.info('Done!')
# Generate timeseries
logging.info('-' * PRINT_WIDTH)
logging.info('Generating timeseries...')
generate_timeseries(districts=False)
logging.info('Done!')
logging.info('-' * PRINT_WIDTH)
logging.info('Adding state and district metadata...')
f = STATE_WISE
with open(f, 'r') as f:
logging.info('File: {}'.format(STATE_WISE.name))
raw_data = json.load(f, object_pairs_hook=OrderedDict)
add_state_meta(raw_data)
f = DISTRICT_WISE
with open(f, 'r') as f:
logging.info('File: {}'.format(DISTRICT_WISE.name))
raw_data = json.load(f, object_pairs_hook=OrderedDict)
add_district_meta(raw_data)
logging.info('Done!')
logging.info('-' * PRINT_WIDTH)
logging.info('Dumping APIs...')
OUTPUT_MIN_DIR.mkdir(parents=True, exist_ok=True)
# Dump prettified full data json
fn = '{}-{}'.format(OUTPUT_DATA_PREFIX, 'all')
with open((OUTPUT_DIR / fn).with_suffix('.json'), 'w') as f:
json.dump(data, f, indent=2, sort_keys=True)
# Dump minified full data
with open((OUTPUT_MIN_DIR / fn).with_suffix('.min.json'), 'w') as f:
| python | MIT | 3303854897685cfeaefed458748771b6ea63bf5c | 2026-01-05T04:52:08.585516Z | true |
covid19india/api | https://github.com/covid19india/api/blob/3303854897685cfeaefed458748771b6ea63bf5c/src/parser_v4.py | src/parser_v4.py | #!/usr/bin/env python3
import csv
import logging
import json
import sys
import yaml
from collections import defaultdict, OrderedDict
from datetime import datetime, timedelta
from pathlib import Path
# Set logging level
logging.basicConfig(stream=sys.stdout,
format="%(message)s",
level=logging.INFO)
# Current date in India
INDIA_DATE = datetime.strftime(
datetime.utcnow() + timedelta(hours=5, minutes=30), "%Y-%m-%d")
INDIA_UTC_OFFSET = "+05:30"
# Arbitrary minimum date
MIN_DATE = "2020-01-01"
# Input/Output root directory
ROOT_DIR = Path("tmp")
CSV_DIR = ROOT_DIR / "csv" / "latest"
# State codes to be used as API keys
META_DATA = ROOT_DIR / "misc.json"
# Geographical districts of India
DISTRICT_LIST = ROOT_DIR / "state_district_wise.json"
# All raw_data's
RAW_DATA = "raw_data{n}.json"
# Deaths and recoveries for entries in raw_data1 and raw_data2
OUTCOME_DATA = "deaths_recoveries{n}.json"
# District data as of 26th April
DISTRICT_DATA_GOSPEL = CSV_DIR / "districts_26apr_gospel.csv"
GOSPEL_DATE = "2020-04-26"
# India testing data
ICMR_TEST_DATA = ROOT_DIR / "data.json"
STATE_TEST_DATA = CSV_DIR / "statewise_tested_numbers_data.csv"
DISTRICT_TEST_DATA = CSV_DIR / "district_testing.csv"
STATE_VACCINATION_DATA = CSV_DIR / "vaccine_doses_statewise_v2.csv"
DISTRICT_VACCINATION_DATA = CSV_DIR / "cowin_vaccine_data_districtwise.csv"
## For adding metadata
# For state notes and last updated
STATE_WISE = ROOT_DIR / "data.json"
# For district notes
DISTRICT_WISE = ROOT_DIR / "state_district_wise.json"
# API outputs
OUTPUT_DIR = ROOT_DIR / "v4"
OUTPUT_MIN_DIR = OUTPUT_DIR / "min"
OUTPUT_DATA_PREFIX = "data"
OUTPUT_TIMESERIES_PREFIX = "timeseries"
# CSV Outputs
OUTPUT_STATES_CSV = CSV_DIR / "states.csv"
OUTPUT_DISTRICTS_CSV = CSV_DIR / "districts.csv"
# Two digit state codes
STATE_CODES = {}
# State codes to state names map (capitalized appropriately)
STATE_NAMES = {}
# State/district populations
STATE_POPULATIONS = {}
DISTRICT_POPULATIONS = defaultdict(dict)
# Code corresponding to MoHFW's 'Unassigned States' in sheet
UNASSIGNED_STATE_CODE = "UN"
# Dict containing geographical districts
DISTRICTS_DICT = defaultdict(dict)
# District key to give to unkown district values in raw_data
UNKNOWN_DISTRICT_KEY = "Unknown"
# States with single district/no district-wise data
SINGLE_DISTRICT_STATES = ["CH", "DL", "LD"]
NO_DISTRICT_DATA_STATES = ["AN", "AS", "GA", "MN", "SK", "TG"]
# Three most important statistics
PRIMARY_STATISTICS = ["confirmed", "recovered", "deceased"]
# Raw data key => Statistics
RAW_DATA_MAP = {
"hospitalized": "confirmed",
"recovered": "recovered",
"deceased": "deceased",
"migrated_other": "other",
}
ICMR_DATA_DICT = {
"tested": {
"key": "totalsamplestested",
"source": "source"
},
"vaccinated1": {
"key": "firstdoseadministered",
"source": "source4"
},
"vaccinated2": {
"key": "seconddoseadministered",
"source": "source4"
},
}
VACCINATION_DATA_DICT = {
"vaccinated1": "First Dose Administered",
"vaccinated2": "Second Dose Administered",
}
ALL_STATISTICS = [*RAW_DATA_MAP.values(), *ICMR_DATA_DICT.keys()]
# CSV Headers
CSV_STATISTIC_HEADERS = {
"confirmed": "Confirmed",
"recovered": "Recovered",
"deceased": "Deceased",
"other": "Other",
"tested": "Tested",
}
STATE_CSV_HEADER = ["Date", "State", *CSV_STATISTIC_HEADERS.values()]
DISTRICT_CSV_HEADER = [
"Date", "State", "District", *CSV_STATISTIC_HEADERS.values()
]
# Skip warning for these states
VACCINATION_SKIP_STATES = {"total", "miscellaneous"}
# Categories to keep in timeseries API
TIMESERIES_TYPES = ["total", "delta", "delta7"]
# Log statements width
PRINT_WIDTH = 70
# Nested default dict of dict
ddict = lambda: defaultdict(ddict)
# Dictionaries which stored final parsed data
data = ddict()
timeseries = ddict()
def parse_state_metadata(raw_data):
for i, entry in enumerate(raw_data["state_meta_data"]):
# State name with sheet capitalization
state_name = entry["stateut"].strip()
# State code caps
state_code = entry["abbreviation"].strip().upper()
STATE_CODES[state_name.lower()] = state_code
STATE_NAMES[state_code] = state_name
# State population
try:
population = int(entry["population"].strip())
except ValueError:
if entry["population"]:
logging.warning(
f"[L{i+2}] [Bad population: {entry['population']}] {state_code}")
continue
STATE_POPULATIONS[state_code] = population
def parse_district_list(raw_data):
# Initialize with districts from single district states
for state in SINGLE_DISTRICT_STATES:
district = STATE_NAMES[state]
DISTRICTS_DICT[state][district.lower()] = district
# Parse from file
for i, entry in enumerate(raw_data.values()):
state = entry["statecode"].strip().upper()
if state not in STATE_CODES.values():
logging.warning(f"[L{i + 2}] Bad state: {entry['statecode']}")
continue
if "districtData" not in entry:
continue
for district in entry["districtData"]:
district = district.strip()
DISTRICTS_DICT[state][district.lower()] = district
def parse_district(district, state, single_district=True, allow_unknown=True):
district = district.strip()
expected = True
if single_district and state in SINGLE_DISTRICT_STATES:
district = STATE_NAMES[state]
elif allow_unknown and state in NO_DISTRICT_DATA_STATES:
district = UNKNOWN_DISTRICT_KEY
elif not district or district.lower() == "unknown":
district = UNKNOWN_DISTRICT_KEY
elif district.lower() in DISTRICTS_DICT[state]:
district = DISTRICTS_DICT[state][district.lower()]
else:
expected = False
return district, expected
def parse_district_metadata(raw_data):
for i, entry in enumerate(raw_data["district_meta_data"]):
# State code
state = entry["statecode"].strip().upper()
if state not in STATE_CODES.values():
logging.warning(f"[L{i + 2}] Bad state: {state}")
continue
# District name with sheet capitalization
district, expected = parse_district(entry["district"],
state,
single_district=False,
allow_unknown=False)
if not expected:
logging.warning(f"[L{i + 2}] [{state}] Unexpected district: {district}")
# District population
try:
population = int(entry["population"].strip())
except ValueError:
if entry["population"]:
logging.warning(
f"[L{i+2}] [Bad population: {entry['population']}] {state}: {district}"
)
continue
DISTRICT_POPULATIONS[state][district] = population
def inc(ref, key, count):
if not isinstance(ref[key], int):
# Initialize with 0
ref[key] = 0
# Increment
ref[key] += count
def parse(raw_data, i):
for j, entry in enumerate(raw_data["raw_data"]):
count_str = entry["numcases"].strip()
if not count_str:
continue
state_name = entry["detectedstate"].strip().lower()
try:
state = STATE_CODES[state_name]
except KeyError:
# Entries with empty state names are discarded
if state_name:
# Unrecognized state entries are discarded and logged
logging.warning(
f"[L{j+2}] [{entry['dateannounced']}] [Bad state: {entry['detectedstate']}] {entry['numcases']}"
)
continue
try:
fdate = datetime.strptime(entry["dateannounced"].strip(), "%d/%m/%Y")
date = datetime.strftime(fdate, "%Y-%m-%d")
if date < MIN_DATE or date > INDIA_DATE:
# Entries from future dates will be ignored
logging.warning(
f"[L{j+2}] [Future/past date: {entry['dateannounced']}] {entry['detectedstate']}: {entry['detecteddistrict']} {entry['numcases']}"
)
continue
except ValueError:
# Bad date
logging.warning(
f"[L{j+2}] [Bad date: {entry['dateannounced']}] {entry['detectedstate']}: {entry['detecteddistrict']} {entry['numcases']}"
)
continue
district, expected = parse_district(entry["detecteddistrict"], state)
if not expected:
# Print unexpected district names
logging.warning(
f"[L{j+2}] [{entry['dateannounced']}] [Unexpected district: {district} ({state})] {entry['numcases']}"
)
try:
count = int(count_str)
except ValueError:
logging.warning(
f"[L{j+2}] [{entry['dateannounced']}] [Bad numcases: {entry['numcases']}] {state}: {district}"
)
continue
if count:
try:
# All rows in v1 and v2 are confirmed cases
statistic = ("confirmed" if i < 3 else
RAW_DATA_MAP[entry["currentstatus"].strip().lower()])
inc(data[date]["TT"]["delta"], statistic, count)
inc(data[date][state]["delta"], statistic, count)
# Don't parse old district data since it's unreliable
if (state in SINGLE_DISTRICT_STATES or state in NO_DISTRICT_DATA_STATES
or
(i > 2 and date > GOSPEL_DATE and state != UNASSIGNED_STATE_CODE)):
inc(
data[date][state]["districts"][district]["delta"],
statistic,
count,
)
except KeyError:
# Unrecognized status
logging.warning(
f"[L{j+2}] [{entry['dateannounced']}] [Bad currentstatus: {entry['currentstatus']}] {state}: {district} {entry['numcases']}"
)
def parse_outcome(outcome_data, i):
for j, entry in enumerate(outcome_data["deaths_recoveries"]):
state_name = entry["state"].strip().lower()
try:
state = STATE_CODES[state_name]
except KeyError:
# Entries with empty state names are discarded
if state_name:
# Unrecognized state entries are discarded and logged
logging.warning(
f"[L{j + 2}] [{entry['date']}] [Bad state: {entry['state']}]")
continue
try:
fdate = datetime.strptime(entry["date"].strip(), "%d/%m/%Y")
date = datetime.strftime(fdate, "%Y-%m-%d")
if date < MIN_DATE or date > INDIA_DATE:
# Entries from future dates will be ignored
logging.warning(
f"[L{j + 2}] [Future/past date: {entry['date']}] {state}")
continue
except ValueError:
# Bad date
logging.warning(f"[L{j + 2}] [Bad date: {entry['date']}] {state}")
continue
district, expected = parse_district(entry["district"], state)
if not expected:
# Print unexpected district names
logging.warning(
f"[L{j+2}] [{entry['date']}] [Unexpected district: {district} ({state})] {entry['numcases']}"
)
try:
statistic = RAW_DATA_MAP[entry["patientstatus"].strip().lower()]
inc(data[date]["TT"]["delta"], statistic, 1)
inc(data[date][state]["delta"], statistic, 1)
if state in SINGLE_DISTRICT_STATES or state in NO_DISTRICT_DATA_STATES:
inc(data[date][state]["districts"][district]["delta"], statistic, 1)
## Don't parse old district data since it's unreliable
# inc(data[date][state]['districts'][district]['delta'], statistic,
# 1)
except KeyError:
# Unrecognized status
logging.warning(
f"[L{j+2}] [{entry['date']}] [Bad patientstatus: {entry['patientstatus']}] {state}: {district}"
)
def parse_district_gospel(reader):
for i, row in enumerate(reader):
state = row["State_Code"].strip().upper()
if (state not in STATE_CODES.values() or state in SINGLE_DISTRICT_STATES
or state in NO_DISTRICT_DATA_STATES):
if state not in STATE_CODES.values():
logging.warning(f"[{i + 2}] Bad state: {state}")
continue
district, expected = parse_district(row["District"], state)
if not expected:
# Print unexpected district names
logging.warning(f"[{i + 2}] Unexpected district: {state} {district}")
for statistic in PRIMARY_STATISTICS:
count = int(row[statistic.capitalize()] or 0)
if count:
data[GOSPEL_DATE][state]["districts"][district]["total"][
statistic] = count
def parse_icmr(icmr_data):
for j, entry in enumerate(icmr_data["tested"]):
for statistic, statistic_dict in ICMR_DATA_DICT.items():
key = statistic_dict["key"]
count_str = entry[key].strip()
if not count_str:
continue
try:
fdate = datetime.strptime(entry["testedasof"].strip(), "%d/%m/%Y")
date = datetime.strftime(fdate, "%Y-%m-%d")
if date < MIN_DATE or date > INDIA_DATE:
# Entries from future dates will be ignored and logged
logging.warning(
f"[L{j + 2}] [Future/past date: {entry['testedasof']}]")
continue
except ValueError:
# Bad timestamp
logging.warning(f"[L{j + 2}] [Bad date: {entry['testedasof']}]")
continue
try:
count = int(count_str)
except ValueError:
logging.warning(
f"[L{j + 2}] [{entry['testedasof']}] [Bad {key}: {entry[key]}]")
continue
if count:
data[date]["TT"]["total"][statistic] = count
# Add source/last updated
meta_key = ("vaccinated" if statistic
in {"vaccinated1", "vaccinated2"} else statistic)
data[date]["TT"]["meta"][meta_key]["source"] = entry[
statistic_dict["source"]].strip()
data[date]["TT"]["meta"][meta_key]["date"] = date
def parse_state_test(reader):
for j, entry in enumerate(reader):
count_str = entry["Total Tested"].strip()
if not count_str:
continue
try:
fdate = datetime.strptime(entry["Updated On"].strip(), "%d/%m/%Y")
date = datetime.strftime(fdate, "%Y-%m-%d")
if date < MIN_DATE or date > INDIA_DATE:
# Entries from future dates will be ignored and logged
logging.warning(
f"[L{j+2}] [Future/past date: {entry['Updated On']}] {entry['State']}"
)
continue
except ValueError:
# Bad date
logging.warning(
f"[L{j + 2}] [Bad date: {entry['Updated On']}] {entry['State']}")
continue
state_name = entry["State"].strip().lower()
try:
state = STATE_CODES[state_name]
except KeyError:
# Entries having unrecognized state names are discarded
logging.warning(
f"[L{j+2}] [{entry['Updated On']}] [Bad state: {entry['State']}]")
continue
try:
count = int(count_str)
except ValueError:
logging.warning(
f"[L{j+2}] [{entry['Updated On']}] [Bad total tested: {entry['Total Tested']}] {entry['State']}"
)
continue
source = entry["Source1"].strip()
if count:
data[date][state]["total"]["tested"] = count
data[date][state]["meta"]["tested"]["source"] = source
data[date][state]["meta"]["tested"]["date"] = date
# Add district entry too for single-district states
if state in SINGLE_DISTRICT_STATES:
# District/State name
district = STATE_NAMES[state]
data[date][state]["districts"][district]["total"]["tested"] = count
data[date][state]["districts"][district]["meta"]["tested"][
"source"] = source
data[date][state]["districts"][district]["meta"]["tested"][
"date"] = date
def column_str(n):
alpha = ""
while n > 0:
n, rem = divmod(n - 1, 26)
alpha = chr(65 + rem) + alpha
return alpha
def parse_pivot_headers(header1, header2):
# Parse till the first date
row_keys = {}
for j, column in enumerate(header1):
try:
fdate = datetime.strptime(column.strip(), "%d/%m/%Y")
break
except ValueError:
row_keys[column.lower()] = j
# Parse headers in each date
column_keys = {}
while j < len(header1) and fdate == datetime.strptime(
header1[j].strip(), "%d/%m/%Y"):
column_keys[header2[j].strip().lower()] = j - len(row_keys)
j += 1
# Parse dates
dates = []
for j in range(len(row_keys), len(header1), len(column_keys)):
dates.append(None)
try:
fdate = datetime.strptime(header1[j].strip(), "%d/%m/%Y")
date = datetime.strftime(fdate, "%Y-%m-%d")
if date < MIN_DATE or date > INDIA_DATE:
# Entries from future dates will be ignored
logging.warning(
f"[{column_str(j + 1)}] Future/past date: {header1[j]}")
continue
dates[-1] = date
except ValueError:
# Bad date
logging.warning(f"[{column_str(j + 1)}] Bad date: {header1[j]}")
return row_keys, column_keys, dates
def parse_district_test(reader):
header1 = next(reader)
header2 = next(reader)
row_keys, column_keys, dates = parse_pivot_headers(header1, header2)
for i, row in enumerate(reader):
state_name = row[row_keys["state"]].strip().lower()
try:
state = STATE_CODES[state_name]
except KeyError:
# Entries having unrecognized state names are discarded
logging.warning(f"[L{i + 3}] Bad state: {row[row_keys['state']]}")
continue
if state in SINGLE_DISTRICT_STATES:
# Skip since value is already added while parsing state data
continue
district, expected = parse_district(row[row_keys["district"]],
state,
allow_unknown=False)
if not expected:
# Print unexpected district names
logging.warning(f"[L{i + 3}] Unexpected district: {state} {district}")
for j1, j2 in enumerate(range(len(row_keys), len(row), len(column_keys))):
# Date from header
date = dates[j1]
if not date:
continue
# Tested count
count_str = row[j2 + column_keys["tested"]].strip()
try:
count = int(count_str)
except ValueError:
if count_str:
logging.warning(
f"[L{i + 3} {column_str(j2 + column_keys['tested'] + 1)}] [{state}: {district}] Bad Tested: {row[j2 + column_keys['tested']]}"
)
continue
# Use Source1 key as source
source = row[j2 + column_keys["source1"]].strip()
if count:
data[date][state]["districts"][district]["total"]["tested"] = count
# data[date][state]['districts'][district]['meta']['tested'][
# 'source'] = source
data[date][state]["districts"][district]["meta"]["tested"][
"date"] = date
def parse_state_vaccination(reader):
for j, entry in enumerate(reader):
for statistic, key in VACCINATION_DATA_DICT.items():
count_str = entry[key].strip()
if not count_str:
continue
try:
fdate = datetime.strptime(entry["Vaccinated As of"].strip(),
"%d/%m/%Y")
date = datetime.strftime(fdate, "%Y-%m-%d")
if date < MIN_DATE or date > INDIA_DATE:
# Entries from future dates will be ignored and logged
logging.warning(
f"[L{j+2}] [Future/past date: {entry['Vaccinated As of']}] {entry['State']}"
)
continue
except ValueError:
# Bad date
logging.warning(
f"[L{j + 2}] [Bad date: {entry['Vaccinated As of']}] {entry['State']}"
)
continue
state_name = entry["State"].strip().lower()
if state_name in VACCINATION_SKIP_STATES:
continue
try:
state = STATE_CODES[state_name]
except KeyError:
# Entries having unrecognized state names are discarded
logging.warning(
f"[L{j+2}] [{entry['Vaccinated As of']}] [Bad state: {entry['State']}]"
)
continue
try:
count = int(count_str)
except ValueError:
logging.warning(
f"[L{j+2}] [{entry['Vaccinated As of']}] [Bad {key}: {entry[key]}] {entry['State']}"
)
continue
if count:
data[date][state]["total"][statistic] = count
# data[date][state]["meta"]["vaccinated"]["source"] = source
data[date][state]["meta"]["vaccinated"]["date"] = date
# Add district entry too for single-district states
if state in SINGLE_DISTRICT_STATES:
# District/State name
district = STATE_NAMES[state]
data[date][state]["districts"][district]["total"][statistic] = count
# data[date][state]["districts"][district]["meta"]["vaccinated"][
# "source"] = source
data[date][state]["districts"][district]["meta"]["vaccinated"][
"date"] = date
def parse_district_vaccination(reader):
header1 = next(reader)
header2 = next(reader)
row_keys, column_keys, dates = parse_pivot_headers(header1, header2)
for i, row in enumerate(reader):
state = row[row_keys["state_code"]].strip().upper()
if state not in STATE_CODES.values():
logging.warning(f"[L{i + 3}] Bad state: {row[row_keys['state_code']]}")
continue
if state in SINGLE_DISTRICT_STATES:
# Skip since value is already added while parsing state data
continue
district, expected = parse_district(row[row_keys["district"]],
state,
allow_unknown=False)
if not expected:
# Print unexpected district names
logging.warning(f"[L{i + 3}] Unexpected district: {state} {district}")
for j1, j2 in enumerate(range(len(row_keys), len(row), len(column_keys))):
# Date from header
date = dates[j1]
if not date:
continue
for statistic in VACCINATION_DATA_DICT:
key = VACCINATION_DATA_DICT[statistic].lower()
count_str = row[j2 + column_keys[key]].strip()
try:
count = int(count_str)
except ValueError:
if count_str:
logging.warning(
f"[L{i + 3} {column_str(j2 + column_keys[key] + 1)}] [{state}: {district}] Bad {key}: {row[j2 + column_keys[key]]}"
)
continue
if count:
inc(
data[date][state]["districts"][district]["total"],
statistic,
count,
)
def contains(raw_data, keys):
if not keys:
return True
elif keys[0] in raw_data:
return contains(raw_data[keys[0]], keys[1:])
else:
return False
def fill_deltas():
dates = sorted(data)
for i, date in enumerate(dates):
curr_data = data[date]
# Initialize today's delta with today's cumulative
for state, state_data in curr_data.items():
for key in ICMR_DATA_DICT:
if contains(state_data, ["total", key]):
state_data["delta"][key] = state_data["total"][key]
if "districts" not in state_data:
continue
for district, district_data in state_data["districts"].items():
for key in ICMR_DATA_DICT:
if contains(district_data, ["total", key]):
district_data["delta"][key] = district_data["total"][key]
if i > 0:
prev_date = dates[i - 1]
prev_data = data[prev_date]
for state, state_data in prev_data.items():
for key in ICMR_DATA_DICT:
if contains(state_data, ["total", key]):
if key in curr_data[state]["total"]:
# Subtract previous cumulative to get delta
curr_data[state]["delta"][key] -= state_data["total"][key]
else:
# Take today's cumulative to be same as yesterday's
# cumulative if today's cumulative is missing
curr_data[state]["total"][key] = state_data["total"][key]
curr_data[state]["meta"][key]["source"] = state_data["meta"][
key]["source"]
curr_data[state]["meta"][key]["date"] = state_data["meta"][key][
"date"]
if "districts" not in state_data:
continue
for district, district_data in state_data["districts"].items():
for key in ICMR_DATA_DICT:
if contains(district_data, ["total", key]):
if key in curr_data[state]["districts"][district]["total"]:
# Subtract previous cumulative to get delta
curr_data[state]["districts"][district]["delta"][
key] -= district_data["total"][key]
else:
# Take today's cumulative to be same as yesterday's
# cumulative if today's cumulative is missing
curr_data[state]["districts"][district]["total"][
key] = district_data["total"][key]
curr_data[state]["districts"][district]["meta"][key][
"source"] = district_data["meta"][key]["source"]
curr_data[state]["districts"][district]["meta"][key][
"date"] = district_data["meta"][key]["date"]
def accumulate(start_after_date=MIN_DATE, end_date="3020-01-30"):
# Cumulate daily delta values into total
dates = sorted(data)
for i, date in enumerate(dates):
if date <= start_after_date:
continue
elif date > end_date:
break
curr_data = data[date]
if i > 0:
# Initialize today's cumulative with previous available
prev_date = dates[i - 1]
prev_data = data[prev_date]
for state, state_data in prev_data.items():
for statistic in RAW_DATA_MAP.values():
if statistic in state_data["total"]:
inc(
curr_data[state]["total"],
statistic,
state_data["total"][statistic],
)
if (state not in SINGLE_DISTRICT_STATES
and state not in NO_DISTRICT_DATA_STATES
and ("districts" not in state_data or date <= GOSPEL_DATE)):
# Old district data is already accumulated
continue
for district, district_data in state_data["districts"].items():
for statistic in RAW_DATA_MAP.values():
if statistic in district_data["total"]:
inc(
curr_data[state]["districts"][district]["total"],
statistic,
district_data["total"][statistic],
)
# Add today's dailys to today's cumulative
for state, state_data in curr_data.items():
if "delta" in state_data:
for statistic in RAW_DATA_MAP.values():
if statistic in state_data["delta"]:
inc(
state_data["total"],
statistic,
state_data["delta"][statistic],
)
if (state not in SINGLE_DISTRICT_STATES
and state not in NO_DISTRICT_DATA_STATES
and ("districts" not in state_data or date <= GOSPEL_DATE)):
# Old district data is already accumulated
continue
for district, district_data in state_data["districts"].items():
if "delta" in district_data:
for statistic in RAW_DATA_MAP.values():
if statistic in district_data["delta"]:
inc(
district_data["total"],
statistic,
district_data["delta"][statistic],
)
def fill_gospel_unknown():
# Gospel doesn't contain unknowns
# Fill them based on gospel date state counts
curr_data = data[GOSPEL_DATE]
for state, state_data in curr_data.items():
if "districts" not in state_data or "total" not in state_data:
# State had no cases yet
continue
sum_district_totals = defaultdict(lambda: 0)
for district, district_data in state_data["districts"].items():
if "total" in district_data:
for statistic, count in district_data["total"].items():
if statistic in PRIMARY_STATISTICS:
sum_district_totals[statistic] += count
for statistic in PRIMARY_STATISTICS:
if statistic in state_data["total"]:
count = state_data["total"][statistic]
if count != sum_district_totals[statistic]:
# Counts don't match
# We take Unknown district values = State - Sum(districts gospel)
state_data["districts"][UNKNOWN_DISTRICT_KEY]["total"][statistic] = (
count - sum_district_totals[statistic])
def accumulate_days(num_days, offset=0, statistics=ALL_STATISTICS):
# Cumulate num_day delta values
for date in data:
curr_data = data[date]
fdate = datetime.strptime(date, "%Y-%m-%d")
dates = [
datetime.strftime(fdate - timedelta(days=x), "%Y-%m-%d")
for x in range(offset, num_days)
]
key = f"delta{num_days}"
if offset > 0:
key = f"{key}_{offset}"
for prev_date in dates:
if prev_date in data:
prev_data = data[prev_date]
for state, state_data in prev_data.items():
if "delta" in state_data:
for statistic in statistics:
if statistic in state_data["delta"]:
inc(
curr_data[state][key],
statistic,
state_data["delta"][statistic],
)
if (state not in SINGLE_DISTRICT_STATES
and state not in NO_DISTRICT_DATA_STATES
and ("districts" not in state_data or date <= GOSPEL_DATE)):
# Old district data is already accumulated
continue
for district, district_data in state_data["districts"].items():
if "delta" in district_data:
for statistic in statistics:
if statistic in district_data["delta"]:
inc(
curr_data[state]["districts"][district][key],
statistic,
district_data["delta"][statistic],
)
def stripper(raw_data, dtype=ddict):
# Remove empty entries
new_data = dtype()
for k, v in raw_data.items():
if isinstance(v, dict):
v = stripper(v, dtype)
if v:
new_data[k] = v
return new_data
def add_populations():
# Add population data for states/districts
for curr_data in data.values():
for state, state_data in curr_data.items():
try:
state_pop = STATE_POPULATIONS[state]
state_data["meta"]["population"] = state_pop
except KeyError:
pass
if "districts" not in state_data:
continue
for district, district_data in state_data["districts"].items():
try:
district_pop = DISTRICT_POPULATIONS[state][district]
district_data["meta"]["population"] = district_pop
except KeyError:
pass
def trim_timeseries():
for state_data in timeseries.values():
if "dates" in state_data:
dates = list(state_data["dates"])
for date in sorted(dates, reverse=True):
if "delta" in state_data["dates"][date]:
last_date = date
break
for date in dates:
if date > last_date:
del state_data["dates"][date]
if "districts" in state_data:
for district_data in state_data["districts"].values():
if "dates" in district_data:
dates = list(district_data["dates"])
for date in sorted(dates, reverse=True):
if "delta" in district_data["dates"][date]:
last_date = date
break
for date in dates:
if date > last_date:
del district_data["dates"][date]
def generate_timeseries(districts=False):
for date in sorted(data):
curr_data = data[date]
for state, state_data in curr_data.items():
for stype in TIMESERIES_TYPES:
if stype in state_data:
for statistic, value in state_data[stype].items():
timeseries[state]["dates"][date][stype][statistic] = value
if not districts or "districts" not in state_data or date < GOSPEL_DATE:
# Total state has no district data
# District timeseries starts only from 26th April
continue
for district, district_data in state_data["districts"].items():
for stype in TIMESERIES_TYPES:
if stype in district_data:
for statistic, value in district_data[stype].items():
timeseries[state]["districts"][district]["dates"][date][stype][
statistic] = value
trim_timeseries()
def add_state_meta(raw_data):
last_date = sorted(data)[-1]
last_data = data[last_date]
for j, entry in enumerate(raw_data["statewise"]):
state = entry["statecode"].strip().upper()
if state not in STATE_CODES.values() or state not in last_data:
# Entries having unrecognized state codes/zero cases are discarded
if state not in STATE_CODES.values():
logging.warning(
f"[L{j+2}] [{entry['lastupdatedtime']}] Bad state: {entry['statecode']}"
)
continue
try:
| python | MIT | 3303854897685cfeaefed458748771b6ea63bf5c | 2026-01-05T04:52:08.585516Z | true |
covid19india/api | https://github.com/covid19india/api/blob/3303854897685cfeaefed458748771b6ea63bf5c/src/build_raw_data.py | src/build_raw_data.py | import pandas as pd # pylint: disable=import-error
import re
from pathlib import Path
import logging
import sys
import os
from urllib.error import HTTPError
# Set logging level
logging.basicConfig(stream=sys.stdout,
format="%(message)s",
level=logging.INFO)
def fetch_raw_data_from_api():
'''
Read all raw data and death and recovery files
Pass the latest version of raw data
'''
i = 1
raw_d = []
while True:
try:
url = f"https://api.covid19india.org/csv/latest/raw_data{i}.csv"
df = pd.read_csv(url)
df.to_csv(f'./tmp/csv/latest/raw_data{i}.csv',index=False)
raw_d.append(df)
logging.info(f"Fetched raw_data{i} ")
i = i+1
except HTTPError:
current_ver = i-1
break
death_rec = []
logging.info(f"Fetching deaths_and_recoveries")
url = f"https://api.covid19india.org/csv/latest/death_and_recovered"
df = pd.read_csv(f"{url}1.csv")
death_rec.append(df)
df.to_csv('./tmp/csv/latest/death_and_recovered1.csv',index=False)
df = pd.read_csv(f"{url}2.csv")
death_rec.append(df)
df.to_csv('./tmp/csv/latest/death_and_recovered2.csv',index=False)
return raw_d,death_rec,current_ver
def fetch_raw_data():
'''
Read all raw data and death and recovery files
Return the latest number of raw data files
'''
raw_d = []
death_rec = []
fpath = Path('tmp/csv/latest')
i = 1
while True:
try:
df = pd.read_csv(fpath / f"raw_data{i}.csv")
raw_d.append(df)
logging.info(f"Fetched raw_data{i} ")
i = i+1
except FileNotFoundError:
current_ver = i-1
break
i = 1
while True:
try:
df = pd.read_csv(fpath / f"death_and_recovered{i}.csv")
death_rec.append(df)
logging.info(f"Fetched death_and_recovered{i} ")
i = i+1
except FileNotFoundError:
break
logging.info(f"Data read complete")
return raw_d,death_rec,current_ver
def fix_rawdata1and2(raw,rec,col_list,sheet_version):
'''
Raw Data 1 and 2 had different format
Select necessary columns and change data types
Add death and recovery data to raw_data
'''
print(f"V{sheet_version} Shape \t: {raw.shape}")
# Only choose the valid current statuses
raw = raw[raw['Current Status'].isin( ['Hospitalized','Recovered','Deceased','Migrated','Migrated_Other'])].copy()
#Prepare neceassary columns
raw['Num Cases'] = 1
raw['Entry_ID'] = 0
raw['Current Status'] = "Hospitalized"
raw = raw.fillna('')
raw = raw[col_list]
# If Detected State is not available, entry is invalid
raw = raw[raw['Detected State'] != ''].copy()
# Convert Date Announced string to datetime
raw['Date Announced'] = pd.to_datetime(raw['Date Announced'],format='%d/%m/%Y')
# Add Sheet Version Column
raw['Sheet_Version'] = sheet_version
# Only choose the valid current statuses
rec = rec[rec['Patient_Status'].isin(['Hospitalized','Recovered','Deceased','Migrated','Migrated_Other'])].copy()
# Prepare necessary columns
rec['Num Cases'] = 1
rec['Entry_ID'] = 0
rec['Current Status'] = rec['Patient_Status']
rec['Date Announced'] = rec['Date']
rec['State code'] = rec['Statecode']
rec['Detected City'] = rec['City']
rec['Status Change Date'] = ''
rec['Contracted from which Patient (Suspected)'] = ''
rec['Detected State'] = rec['State']
rec['Detected District'] = rec['District']
rec['Patient Number'] = rec['Patient_Number (Could be mapped later)']
rec['State Patient Number'] = ''
rec['Type of transmission'] = ''
rec = rec.fillna('')
rec = rec[col_list]
# If Detected State is not available, entry is invalid
rec = rec[rec['Detected State'] != ''].copy()
# Convert Date column from string to date
rec['Date Announced'] = pd.to_datetime(rec['Date Announced'],format='%d/%m/%Y')
# Add sheet version
rec['Sheet_Version'] = sheet_version
# Add deaths and recoveries to raw data
raw = pd.concat([raw,rec],sort=True)
return raw
def merge_alldata(current_ver):
'''
Merge it all together
'''
col_list = ['Entry_ID', 'State Patient Number', 'Date Announced', 'Age Bracket',
'Gender', 'Detected City', 'Detected District', 'Detected State',
'State code', 'Num Cases', 'Current Status',
'Contracted from which Patient (Suspected)', 'Notes', 'Source_1',
'Source_2', 'Source_3', 'Nationality', 'Type of transmission',
'Status Change Date', 'Patient Number']
allraw = fix_rawdata1and2(raw_d[0],death_rec[0],col_list,sheet_version=1)
tmp = fix_rawdata1and2(raw_d[1],death_rec[1],col_list,sheet_version=2)
allraw = pd.concat([allraw,tmp],sort=True)
for i in range(2,current_ver):
print(f"V{i+1} Shape \t: {tmp.shape}")
tmp = raw_d[i]
tmp = tmp.fillna('')
# Remove rows that doesn't have
# any State mentioned.
# This handles the situation at
# the tail of most recent sheet
tmp = tmp[tmp['Detected State'] != ''].copy()
# Select only necessary columns
tmp = tmp[col_list]
# Convert date string to datetime
tmp['Date Announced'] = pd.to_datetime(tmp['Date Announced'],format='%d/%m/%Y')
# Add sheet version
tmp['Sheet_Version'] = i+1
allraw = pd.concat([allraw,tmp],sort=True)
# Try to fix age to float
allraw['Age Bracket'] = allraw['Age Bracket'].map(lambda x : fix_age(x))
# Try to fix gender column
allraw['Gender'] = allraw['Gender'].map(lambda x : fix_gender(x))
print(f"Raw Data Shape \t: {allraw.shape}")
return allraw
def fix_age(age):
'''
Age entries are sometimes entered in months.
Change them to fraction
'''
rgx_month = re.compile(r"([0-9]*)( month?.)")
rgx_day = re.compile(r"([0-9]*)( day?.)")
res_month = rgx_month.search(str(age).lower())
res_day = rgx_day.search(str(age).lower())
if res_month:
age_corr = float(res_month.group(1))/12
return round(age_corr,2)
elif res_day:
age_corr = float(res_day.group(1))/365.25
return round(age_corr,2)
return float(age)
def fix_gender(g):
'''
Fix any invalid entries in gender column
'''
rgx_F = re.compile(r"[w,W]|[f,F]emale")
rgx_M = re.compile(r"[m,M]ale")
g = str(g)
g = re.sub(rgx_F,"F",g)
g = re.sub(rgx_M,"M",g)
return g
def compare_with_gospel():
'''
Till April 26th, the districtwise sheet was managed
separately. i.e, Raw Data till then do not truly represent
the district values till then.
This function compares the entries in Raw Data with gospel
Note that this function ignores the blank districts.
'''
# Read merged data
df = pd.read_csv('./tmp/csv/latest/raw_data.csv',low_memory=False)
df.head()
df['Date Announced'] = pd.to_datetime(df['Date Announced'])
df = df[df['Date Announced'] <= '2020-04-26']
df['District_Key'] = df['State code'] + "_" + df['Detected District']
df['Num Cases'] = pd.to_numeric(df['Num Cases'], errors='coerce')
dis_counts = pd.pivot_table(df,values = 'Num Cases',
index = 'District_Key',
columns='Current Status',
aggfunc = sum).reset_index()
dis_counts.rename(columns={'Hospitalized':'Confirmed'},inplace=True)
# Read gospel
url = "https://raw.githubusercontent.com/covid19india/api/gh-pages/csv/latest/districts_26apr_gospel.csv"
gospel = pd.read_csv(url)
compare = pd.merge(gospel,dis_counts,on='District_Key', suffixes=("_gospel","_v1v2"))
compare.fillna(0,inplace=True)
compare['Conf_Diff'] = compare['Confirmed_gospel'] - compare['Confirmed_v1v2']
compare['Reco_Diff'] = compare['Recovered_gospel'] - compare['Recovered_v1v2']
compare['Dece_Diff'] = compare['Deceased_gospel'] - compare['Deceased_v1v2']
compare.to_csv("./tmp/csv/compare_gospel_v1v2.csv",index=False)
logging.info('Comparison file saved as ./tmp/csv/compare_gospel_v1v2.csv')
return compare
if __name__ == "__main__":
logging.info('''----------------------------------------------------------------------''')
logging.info('''Build one true raw data''')
logging.info('''----------------------------------------------------------------------''')
os.makedirs('./tmp/csv/latest/',exist_ok=True)
try:
# raw_d,death_rec,current_ver = fetch_raw_data()
# If remote fetch is required
raw_d,death_rec,current_ver = fetch_raw_data_from_api()
except Exception as e:
logging.error(f"Error while reading the files")
raise
logging.info('''----------------------------------------------------------------------''')
allraw = merge_alldata(current_ver)
allraw.to_csv('./tmp/csv/latest/raw_data.csv',index=False)
logging.info('''----------------------------------------------------------------------''')
logging.info('''Raw Data saved''')
logging.info('''----------------------------------------------------------------------''')
logging.info('''Comparing with Gospel''')
_ = compare_with_gospel()
logging.info('''----------------------------------------------------------------------''')
| python | MIT | 3303854897685cfeaefed458748771b6ea63bf5c | 2026-01-05T04:52:08.585516Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.