repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
adamcharnock/swiftwind | swiftwind/billing_cycle/models.py | BillingCycle.populate | python | def populate(cls, as_of=None):
return cls._populate(as_of=as_of or date.today(), delete=True) | Ensure the next X years of billing cycles exist | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/billing_cycle/models.py#L78-L81 | [
"def _populate(cls, as_of=None, delete=False):\n \"\"\"Populate the table with billing cycles starting from `as_of`\n\n Args:\n as_of (date): The date at which to begin the populating\n delete (bool): Should future billing cycles be deleted?\n\n\n \"\"\"\n billing_cycle_helper = get_billin... | class BillingCycle(models.Model):
# TODO: Currently does not support changing of billing-cycle type (i.e. monthly/weekly)
# once data has been created
uuid = SmallUUIDField(default=uuid_default(), editable=False)
date_range = DateRangeField(
db_index=True,
help_text='The start and end date of this billing cycle. '
'May not overlay with any other billing cycles.'
)
transactions_created = models.BooleanField(
default=False,
help_text='Have transactions been created for this billing cycle?'
)
statements_sent = models.BooleanField(
default=False,
help_text='Have we sent housemates their statements for this billing cycle?'
)
objects = BillingCycleManager()
class Meta:
ordering = ['date_range']
def __str__(self):
return 'Cycle starting {}'.format(formats.localize(self.date_range.lower, use_l10n=True))
def __repr__(self):
return 'BillingCycle <{}>'.format(self.date_range)
@classmethod
@classmethod
def repopulate(cls):
"""Create the next X years of billing cycles
Will delete any billing cycles which are in the future
"""
return cls._populate(as_of=date.today(), delete=False)
@classmethod
def _populate(cls, as_of=None, delete=False):
"""Populate the table with billing cycles starting from `as_of`
Args:
as_of (date): The date at which to begin the populating
delete (bool): Should future billing cycles be deleted?
"""
billing_cycle_helper = get_billing_cycle()
billing_cycles_exist = BillingCycle.objects.exists()
try:
current_billing_cycle = BillingCycle.objects.as_of(date=as_of)
except BillingCycle.DoesNotExist:
current_billing_cycle = None
# If no cycles exist then disable the deletion logic
if not billing_cycles_exist:
delete = False
# Cycles exist, but a date has been specified outside of them
if billing_cycles_exist and not current_billing_cycle:
raise CannotPopulateForDateOutsideExistingCycles()
# Omit the current billing cycle if we are deleting (as
# deleting the current billing cycle will be a Bad Idea)
omit_current = (current_billing_cycle and delete)
stop_date = as_of + relativedelta(years=settings.SWIFTWIND_BILLING_CYCLE_YEARS)
date_ranges = billing_cycle_helper.generate_date_ranges(as_of, stop_date=stop_date, omit_current=omit_current)
date_ranges = list(date_ranges)
beginning_date = date_ranges[0][0]
with db_transaction.atomic():
if delete:
# Delete all the future unused transactions
cls.objects.filter(start_date__gte=beginning_date).delete()
for start_date, end_date in date_ranges:
exists = BillingCycle.objects.filter(date_range=(start_date, end_date)).exists()
if exists:
if delete:
raise Exception(
'It should not be possible to get here as future billing cycles have just been deleted'
)
else:
# We're updating, so we can just ignore cycles that already exist
pass
else:
BillingCycle.objects.create(
date_range=(start_date, end_date),
)
def get_next(self):
"""Get the billing cycle after this one. May return None"""
return BillingCycle.objects.filter(date_range__gt=self.date_range).order_by('date_range').first()
def get_previous(self):
"""Get the billing cycle prior to this one. May return None"""
return BillingCycle.objects.filter(date_range__lt=self.date_range).order_by('date_range').last()
def is_reconciled(self):
"""Have transactions been imported and reconciled for this billing cycle?"""
from hordak.models import StatementImport, StatementLine
since = datetime(
self.date_range.lower.year,
self.date_range.lower.month,
self.date_range.lower.day,
tzinfo=UTC
)
if not StatementImport.objects.filter(timestamp__gte=since).exists():
# No import done since the end of the above billing cycle, and reconciliation
# requires an import. Therefore reconciliation can not have been done
return False
if StatementLine.objects.filter(
transaction__isnull=True,
date__gte=self.date_range.lower,
date__lt=self.date_range.upper
).exists():
# There are statement lines for this period which have not been reconciled
return False
return True
def notify_housemates(self):
"""Notify housemates in one of two ways:
1. Reconciliation is required before statements can be sent
2. Send a statement
"""
if self.is_reconciled():
self.send_statements()
else:
self.send_reconciliation_required()
def send_reconciliation_required(self):
from swiftwind.accounts.views import ReconciliationRequiredEmailView
for housemate in Housemate.objects.filter(user__is_active=True):
html = ReconciliationRequiredEmailView.get_html()
send_mail(
subject='Reconciliation required'.format(),
message='See {}{}'.format(
get_site_root(),
reverse('accounts:housemate_reconciliation_required_email')
),
from_email=Settings.objects.get().email_from_address,
recipient_list=[housemate.user.email],
html_message=html,
)
def can_create_transactions(self):
"""Can we create the transactions
We can only do this if the previous cycle has been reconciled,
as some costs may depend upon it to calculate their amounts.
"""
previous = self.get_previous()
return not previous or previous.is_reconciled()
def can_send_statements(self):
return self.can_create_transactions() and self.transactions_created
@transaction.atomic()
def send_statements(self, force=False):
from swiftwind.accounts.views import StatementEmailView
should_send = force or (not self.statements_sent and self.transactions_created)
if not should_send:
return False
for housemate in Housemate.objects.filter(user__is_active=True):
html = StatementEmailView.get_html(
uuid=housemate.uuid,
date=str(self.date_range.lower)
)
send_mail(
subject='{}, your house statement for {}'.format(
housemate.user.first_name or housemate.user.username,
# TODO: Assumes monthly billing cycles
self.date_range.lower.strftime('%B %Y'),
),
message='See {}{}'.format(
get_site_root(),
reverse('accounts:housemate_statement_email',
args=[housemate.uuid, str(self.date_range.lower)]
)
),
from_email=Settings.objects.get().email_from_address,
recipient_list=[housemate.user.email],
html_message=html,
)
def enact_all_costs(self):
from swiftwind.costs.models import RecurringCost
with transaction.atomic():
for recurring_cost in RecurringCost.objects.all():
try:
recurring_cost.enact(self)
except (CannotEnactUnenactableRecurringCostError, RecurringCostAlreadyEnactedForBillingCycle):
pass
self.transactions_created = True
self.save()
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disable_if_done()
def unenact_all_costs(self):
from swiftwind.costs.models import RecurringCost, RecurredCost
with transaction.atomic():
transaction_ids = list(Transaction.objects.filter(recurred_cost__billing_cycle=self).values_list('pk', flat=True))
RecurredCost.objects.filter(billing_cycle=self).delete()
Transaction.objects.filter(pk__in=transaction_ids).delete()
self.transactions_created = False
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disabled = False
recurring_cost.save()
recurring_cost.disable_if_done()
self.save()
def reenact_all_costs(self):
from swiftwind.costs.models import RecurringCost, RecurredCost
with transaction.atomic():
# We need to delete the recurred cost before the transactions
# otherwise django will complain that the RecurredCost.transaction
# field cannot be set to null
transaction_ids = list(Transaction.objects.filter(recurred_cost__billing_cycle=self).values_list('pk', flat=True))
RecurredCost.objects.filter(billing_cycle=self).delete()
Transaction.objects.filter(pk__in=transaction_ids).delete()
self.transactions_created = False
self.save()
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disabled = False
if not recurring_cost.is_enactable(self.start_date):
continue
recurring_cost.save()
recurring_cost.enact(self, disable_if_done=False)
self.transactions_created = True
self.save()
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disable_if_done()
|
adamcharnock/swiftwind | swiftwind/billing_cycle/models.py | BillingCycle._populate | python | def _populate(cls, as_of=None, delete=False):
billing_cycle_helper = get_billing_cycle()
billing_cycles_exist = BillingCycle.objects.exists()
try:
current_billing_cycle = BillingCycle.objects.as_of(date=as_of)
except BillingCycle.DoesNotExist:
current_billing_cycle = None
# If no cycles exist then disable the deletion logic
if not billing_cycles_exist:
delete = False
# Cycles exist, but a date has been specified outside of them
if billing_cycles_exist and not current_billing_cycle:
raise CannotPopulateForDateOutsideExistingCycles()
# Omit the current billing cycle if we are deleting (as
# deleting the current billing cycle will be a Bad Idea)
omit_current = (current_billing_cycle and delete)
stop_date = as_of + relativedelta(years=settings.SWIFTWIND_BILLING_CYCLE_YEARS)
date_ranges = billing_cycle_helper.generate_date_ranges(as_of, stop_date=stop_date, omit_current=omit_current)
date_ranges = list(date_ranges)
beginning_date = date_ranges[0][0]
with db_transaction.atomic():
if delete:
# Delete all the future unused transactions
cls.objects.filter(start_date__gte=beginning_date).delete()
for start_date, end_date in date_ranges:
exists = BillingCycle.objects.filter(date_range=(start_date, end_date)).exists()
if exists:
if delete:
raise Exception(
'It should not be possible to get here as future billing cycles have just been deleted'
)
else:
# We're updating, so we can just ignore cycles that already exist
pass
else:
BillingCycle.objects.create(
date_range=(start_date, end_date),
) | Populate the table with billing cycles starting from `as_of`
Args:
as_of (date): The date at which to begin the populating
delete (bool): Should future billing cycles be deleted? | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/billing_cycle/models.py#L92-L146 | [
"def get_billing_cycle():\n \"\"\"\n\n Returns:\n BaseCycle:\n \"\"\"\n return import_string(settings.SWIFTWIND_BILLING_CYCLE)()\n"
] | class BillingCycle(models.Model):
# TODO: Currently does not support changing of billing-cycle type (i.e. monthly/weekly)
# once data has been created
uuid = SmallUUIDField(default=uuid_default(), editable=False)
date_range = DateRangeField(
db_index=True,
help_text='The start and end date of this billing cycle. '
'May not overlay with any other billing cycles.'
)
transactions_created = models.BooleanField(
default=False,
help_text='Have transactions been created for this billing cycle?'
)
statements_sent = models.BooleanField(
default=False,
help_text='Have we sent housemates their statements for this billing cycle?'
)
objects = BillingCycleManager()
class Meta:
ordering = ['date_range']
def __str__(self):
return 'Cycle starting {}'.format(formats.localize(self.date_range.lower, use_l10n=True))
def __repr__(self):
return 'BillingCycle <{}>'.format(self.date_range)
@classmethod
def populate(cls, as_of=None):
"""Ensure the next X years of billing cycles exist
"""
return cls._populate(as_of=as_of or date.today(), delete=True)
@classmethod
def repopulate(cls):
"""Create the next X years of billing cycles
Will delete any billing cycles which are in the future
"""
return cls._populate(as_of=date.today(), delete=False)
@classmethod
def get_next(self):
"""Get the billing cycle after this one. May return None"""
return BillingCycle.objects.filter(date_range__gt=self.date_range).order_by('date_range').first()
def get_previous(self):
"""Get the billing cycle prior to this one. May return None"""
return BillingCycle.objects.filter(date_range__lt=self.date_range).order_by('date_range').last()
def is_reconciled(self):
"""Have transactions been imported and reconciled for this billing cycle?"""
from hordak.models import StatementImport, StatementLine
since = datetime(
self.date_range.lower.year,
self.date_range.lower.month,
self.date_range.lower.day,
tzinfo=UTC
)
if not StatementImport.objects.filter(timestamp__gte=since).exists():
# No import done since the end of the above billing cycle, and reconciliation
# requires an import. Therefore reconciliation can not have been done
return False
if StatementLine.objects.filter(
transaction__isnull=True,
date__gte=self.date_range.lower,
date__lt=self.date_range.upper
).exists():
# There are statement lines for this period which have not been reconciled
return False
return True
def notify_housemates(self):
"""Notify housemates in one of two ways:
1. Reconciliation is required before statements can be sent
2. Send a statement
"""
if self.is_reconciled():
self.send_statements()
else:
self.send_reconciliation_required()
def send_reconciliation_required(self):
from swiftwind.accounts.views import ReconciliationRequiredEmailView
for housemate in Housemate.objects.filter(user__is_active=True):
html = ReconciliationRequiredEmailView.get_html()
send_mail(
subject='Reconciliation required'.format(),
message='See {}{}'.format(
get_site_root(),
reverse('accounts:housemate_reconciliation_required_email')
),
from_email=Settings.objects.get().email_from_address,
recipient_list=[housemate.user.email],
html_message=html,
)
def can_create_transactions(self):
"""Can we create the transactions
We can only do this if the previous cycle has been reconciled,
as some costs may depend upon it to calculate their amounts.
"""
previous = self.get_previous()
return not previous or previous.is_reconciled()
def can_send_statements(self):
return self.can_create_transactions() and self.transactions_created
@transaction.atomic()
def send_statements(self, force=False):
from swiftwind.accounts.views import StatementEmailView
should_send = force or (not self.statements_sent and self.transactions_created)
if not should_send:
return False
for housemate in Housemate.objects.filter(user__is_active=True):
html = StatementEmailView.get_html(
uuid=housemate.uuid,
date=str(self.date_range.lower)
)
send_mail(
subject='{}, your house statement for {}'.format(
housemate.user.first_name or housemate.user.username,
# TODO: Assumes monthly billing cycles
self.date_range.lower.strftime('%B %Y'),
),
message='See {}{}'.format(
get_site_root(),
reverse('accounts:housemate_statement_email',
args=[housemate.uuid, str(self.date_range.lower)]
)
),
from_email=Settings.objects.get().email_from_address,
recipient_list=[housemate.user.email],
html_message=html,
)
def enact_all_costs(self):
from swiftwind.costs.models import RecurringCost
with transaction.atomic():
for recurring_cost in RecurringCost.objects.all():
try:
recurring_cost.enact(self)
except (CannotEnactUnenactableRecurringCostError, RecurringCostAlreadyEnactedForBillingCycle):
pass
self.transactions_created = True
self.save()
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disable_if_done()
def unenact_all_costs(self):
from swiftwind.costs.models import RecurringCost, RecurredCost
with transaction.atomic():
transaction_ids = list(Transaction.objects.filter(recurred_cost__billing_cycle=self).values_list('pk', flat=True))
RecurredCost.objects.filter(billing_cycle=self).delete()
Transaction.objects.filter(pk__in=transaction_ids).delete()
self.transactions_created = False
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disabled = False
recurring_cost.save()
recurring_cost.disable_if_done()
self.save()
def reenact_all_costs(self):
from swiftwind.costs.models import RecurringCost, RecurredCost
with transaction.atomic():
# We need to delete the recurred cost before the transactions
# otherwise django will complain that the RecurredCost.transaction
# field cannot be set to null
transaction_ids = list(Transaction.objects.filter(recurred_cost__billing_cycle=self).values_list('pk', flat=True))
RecurredCost.objects.filter(billing_cycle=self).delete()
Transaction.objects.filter(pk__in=transaction_ids).delete()
self.transactions_created = False
self.save()
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disabled = False
if not recurring_cost.is_enactable(self.start_date):
continue
recurring_cost.save()
recurring_cost.enact(self, disable_if_done=False)
self.transactions_created = True
self.save()
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disable_if_done()
|
adamcharnock/swiftwind | swiftwind/billing_cycle/models.py | BillingCycle.get_next | python | def get_next(self):
return BillingCycle.objects.filter(date_range__gt=self.date_range).order_by('date_range').first() | Get the billing cycle after this one. May return None | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/billing_cycle/models.py#L148-L150 | null | class BillingCycle(models.Model):
# TODO: Currently does not support changing of billing-cycle type (i.e. monthly/weekly)
# once data has been created
uuid = SmallUUIDField(default=uuid_default(), editable=False)
date_range = DateRangeField(
db_index=True,
help_text='The start and end date of this billing cycle. '
'May not overlay with any other billing cycles.'
)
transactions_created = models.BooleanField(
default=False,
help_text='Have transactions been created for this billing cycle?'
)
statements_sent = models.BooleanField(
default=False,
help_text='Have we sent housemates their statements for this billing cycle?'
)
objects = BillingCycleManager()
class Meta:
ordering = ['date_range']
def __str__(self):
return 'Cycle starting {}'.format(formats.localize(self.date_range.lower, use_l10n=True))
def __repr__(self):
return 'BillingCycle <{}>'.format(self.date_range)
@classmethod
def populate(cls, as_of=None):
"""Ensure the next X years of billing cycles exist
"""
return cls._populate(as_of=as_of or date.today(), delete=True)
@classmethod
def repopulate(cls):
"""Create the next X years of billing cycles
Will delete any billing cycles which are in the future
"""
return cls._populate(as_of=date.today(), delete=False)
@classmethod
def _populate(cls, as_of=None, delete=False):
"""Populate the table with billing cycles starting from `as_of`
Args:
as_of (date): The date at which to begin the populating
delete (bool): Should future billing cycles be deleted?
"""
billing_cycle_helper = get_billing_cycle()
billing_cycles_exist = BillingCycle.objects.exists()
try:
current_billing_cycle = BillingCycle.objects.as_of(date=as_of)
except BillingCycle.DoesNotExist:
current_billing_cycle = None
# If no cycles exist then disable the deletion logic
if not billing_cycles_exist:
delete = False
# Cycles exist, but a date has been specified outside of them
if billing_cycles_exist and not current_billing_cycle:
raise CannotPopulateForDateOutsideExistingCycles()
# Omit the current billing cycle if we are deleting (as
# deleting the current billing cycle will be a Bad Idea)
omit_current = (current_billing_cycle and delete)
stop_date = as_of + relativedelta(years=settings.SWIFTWIND_BILLING_CYCLE_YEARS)
date_ranges = billing_cycle_helper.generate_date_ranges(as_of, stop_date=stop_date, omit_current=omit_current)
date_ranges = list(date_ranges)
beginning_date = date_ranges[0][0]
with db_transaction.atomic():
if delete:
# Delete all the future unused transactions
cls.objects.filter(start_date__gte=beginning_date).delete()
for start_date, end_date in date_ranges:
exists = BillingCycle.objects.filter(date_range=(start_date, end_date)).exists()
if exists:
if delete:
raise Exception(
'It should not be possible to get here as future billing cycles have just been deleted'
)
else:
# We're updating, so we can just ignore cycles that already exist
pass
else:
BillingCycle.objects.create(
date_range=(start_date, end_date),
)
def get_previous(self):
"""Get the billing cycle prior to this one. May return None"""
return BillingCycle.objects.filter(date_range__lt=self.date_range).order_by('date_range').last()
def is_reconciled(self):
"""Have transactions been imported and reconciled for this billing cycle?"""
from hordak.models import StatementImport, StatementLine
since = datetime(
self.date_range.lower.year,
self.date_range.lower.month,
self.date_range.lower.day,
tzinfo=UTC
)
if not StatementImport.objects.filter(timestamp__gte=since).exists():
# No import done since the end of the above billing cycle, and reconciliation
# requires an import. Therefore reconciliation can not have been done
return False
if StatementLine.objects.filter(
transaction__isnull=True,
date__gte=self.date_range.lower,
date__lt=self.date_range.upper
).exists():
# There are statement lines for this period which have not been reconciled
return False
return True
def notify_housemates(self):
"""Notify housemates in one of two ways:
1. Reconciliation is required before statements can be sent
2. Send a statement
"""
if self.is_reconciled():
self.send_statements()
else:
self.send_reconciliation_required()
def send_reconciliation_required(self):
from swiftwind.accounts.views import ReconciliationRequiredEmailView
for housemate in Housemate.objects.filter(user__is_active=True):
html = ReconciliationRequiredEmailView.get_html()
send_mail(
subject='Reconciliation required'.format(),
message='See {}{}'.format(
get_site_root(),
reverse('accounts:housemate_reconciliation_required_email')
),
from_email=Settings.objects.get().email_from_address,
recipient_list=[housemate.user.email],
html_message=html,
)
def can_create_transactions(self):
"""Can we create the transactions
We can only do this if the previous cycle has been reconciled,
as some costs may depend upon it to calculate their amounts.
"""
previous = self.get_previous()
return not previous or previous.is_reconciled()
def can_send_statements(self):
return self.can_create_transactions() and self.transactions_created
@transaction.atomic()
def send_statements(self, force=False):
from swiftwind.accounts.views import StatementEmailView
should_send = force or (not self.statements_sent and self.transactions_created)
if not should_send:
return False
for housemate in Housemate.objects.filter(user__is_active=True):
html = StatementEmailView.get_html(
uuid=housemate.uuid,
date=str(self.date_range.lower)
)
send_mail(
subject='{}, your house statement for {}'.format(
housemate.user.first_name or housemate.user.username,
# TODO: Assumes monthly billing cycles
self.date_range.lower.strftime('%B %Y'),
),
message='See {}{}'.format(
get_site_root(),
reverse('accounts:housemate_statement_email',
args=[housemate.uuid, str(self.date_range.lower)]
)
),
from_email=Settings.objects.get().email_from_address,
recipient_list=[housemate.user.email],
html_message=html,
)
def enact_all_costs(self):
from swiftwind.costs.models import RecurringCost
with transaction.atomic():
for recurring_cost in RecurringCost.objects.all():
try:
recurring_cost.enact(self)
except (CannotEnactUnenactableRecurringCostError, RecurringCostAlreadyEnactedForBillingCycle):
pass
self.transactions_created = True
self.save()
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disable_if_done()
def unenact_all_costs(self):
from swiftwind.costs.models import RecurringCost, RecurredCost
with transaction.atomic():
transaction_ids = list(Transaction.objects.filter(recurred_cost__billing_cycle=self).values_list('pk', flat=True))
RecurredCost.objects.filter(billing_cycle=self).delete()
Transaction.objects.filter(pk__in=transaction_ids).delete()
self.transactions_created = False
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disabled = False
recurring_cost.save()
recurring_cost.disable_if_done()
self.save()
def reenact_all_costs(self):
from swiftwind.costs.models import RecurringCost, RecurredCost
with transaction.atomic():
# We need to delete the recurred cost before the transactions
# otherwise django will complain that the RecurredCost.transaction
# field cannot be set to null
transaction_ids = list(Transaction.objects.filter(recurred_cost__billing_cycle=self).values_list('pk', flat=True))
RecurredCost.objects.filter(billing_cycle=self).delete()
Transaction.objects.filter(pk__in=transaction_ids).delete()
self.transactions_created = False
self.save()
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disabled = False
if not recurring_cost.is_enactable(self.start_date):
continue
recurring_cost.save()
recurring_cost.enact(self, disable_if_done=False)
self.transactions_created = True
self.save()
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disable_if_done()
|
adamcharnock/swiftwind | swiftwind/billing_cycle/models.py | BillingCycle.get_previous | python | def get_previous(self):
return BillingCycle.objects.filter(date_range__lt=self.date_range).order_by('date_range').last() | Get the billing cycle prior to this one. May return None | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/billing_cycle/models.py#L152-L154 | null | class BillingCycle(models.Model):
# TODO: Currently does not support changing of billing-cycle type (i.e. monthly/weekly)
# once data has been created
uuid = SmallUUIDField(default=uuid_default(), editable=False)
date_range = DateRangeField(
db_index=True,
help_text='The start and end date of this billing cycle. '
'May not overlay with any other billing cycles.'
)
transactions_created = models.BooleanField(
default=False,
help_text='Have transactions been created for this billing cycle?'
)
statements_sent = models.BooleanField(
default=False,
help_text='Have we sent housemates their statements for this billing cycle?'
)
objects = BillingCycleManager()
class Meta:
ordering = ['date_range']
def __str__(self):
return 'Cycle starting {}'.format(formats.localize(self.date_range.lower, use_l10n=True))
def __repr__(self):
return 'BillingCycle <{}>'.format(self.date_range)
@classmethod
def populate(cls, as_of=None):
"""Ensure the next X years of billing cycles exist
"""
return cls._populate(as_of=as_of or date.today(), delete=True)
@classmethod
def repopulate(cls):
"""Create the next X years of billing cycles
Will delete any billing cycles which are in the future
"""
return cls._populate(as_of=date.today(), delete=False)
@classmethod
def _populate(cls, as_of=None, delete=False):
"""Populate the table with billing cycles starting from `as_of`
Args:
as_of (date): The date at which to begin the populating
delete (bool): Should future billing cycles be deleted?
"""
billing_cycle_helper = get_billing_cycle()
billing_cycles_exist = BillingCycle.objects.exists()
try:
current_billing_cycle = BillingCycle.objects.as_of(date=as_of)
except BillingCycle.DoesNotExist:
current_billing_cycle = None
# If no cycles exist then disable the deletion logic
if not billing_cycles_exist:
delete = False
# Cycles exist, but a date has been specified outside of them
if billing_cycles_exist and not current_billing_cycle:
raise CannotPopulateForDateOutsideExistingCycles()
# Omit the current billing cycle if we are deleting (as
# deleting the current billing cycle will be a Bad Idea)
omit_current = (current_billing_cycle and delete)
stop_date = as_of + relativedelta(years=settings.SWIFTWIND_BILLING_CYCLE_YEARS)
date_ranges = billing_cycle_helper.generate_date_ranges(as_of, stop_date=stop_date, omit_current=omit_current)
date_ranges = list(date_ranges)
beginning_date = date_ranges[0][0]
with db_transaction.atomic():
if delete:
# Delete all the future unused transactions
cls.objects.filter(start_date__gte=beginning_date).delete()
for start_date, end_date in date_ranges:
exists = BillingCycle.objects.filter(date_range=(start_date, end_date)).exists()
if exists:
if delete:
raise Exception(
'It should not be possible to get here as future billing cycles have just been deleted'
)
else:
# We're updating, so we can just ignore cycles that already exist
pass
else:
BillingCycle.objects.create(
date_range=(start_date, end_date),
)
def get_next(self):
"""Get the billing cycle after this one. May return None"""
return BillingCycle.objects.filter(date_range__gt=self.date_range).order_by('date_range').first()
def is_reconciled(self):
"""Have transactions been imported and reconciled for this billing cycle?"""
from hordak.models import StatementImport, StatementLine
since = datetime(
self.date_range.lower.year,
self.date_range.lower.month,
self.date_range.lower.day,
tzinfo=UTC
)
if not StatementImport.objects.filter(timestamp__gte=since).exists():
# No import done since the end of the above billing cycle, and reconciliation
# requires an import. Therefore reconciliation can not have been done
return False
if StatementLine.objects.filter(
transaction__isnull=True,
date__gte=self.date_range.lower,
date__lt=self.date_range.upper
).exists():
# There are statement lines for this period which have not been reconciled
return False
return True
def notify_housemates(self):
"""Notify housemates in one of two ways:
1. Reconciliation is required before statements can be sent
2. Send a statement
"""
if self.is_reconciled():
self.send_statements()
else:
self.send_reconciliation_required()
def send_reconciliation_required(self):
from swiftwind.accounts.views import ReconciliationRequiredEmailView
for housemate in Housemate.objects.filter(user__is_active=True):
html = ReconciliationRequiredEmailView.get_html()
send_mail(
subject='Reconciliation required'.format(),
message='See {}{}'.format(
get_site_root(),
reverse('accounts:housemate_reconciliation_required_email')
),
from_email=Settings.objects.get().email_from_address,
recipient_list=[housemate.user.email],
html_message=html,
)
def can_create_transactions(self):
"""Can we create the transactions
We can only do this if the previous cycle has been reconciled,
as some costs may depend upon it to calculate their amounts.
"""
previous = self.get_previous()
return not previous or previous.is_reconciled()
def can_send_statements(self):
return self.can_create_transactions() and self.transactions_created
@transaction.atomic()
def send_statements(self, force=False):
from swiftwind.accounts.views import StatementEmailView
should_send = force or (not self.statements_sent and self.transactions_created)
if not should_send:
return False
for housemate in Housemate.objects.filter(user__is_active=True):
html = StatementEmailView.get_html(
uuid=housemate.uuid,
date=str(self.date_range.lower)
)
send_mail(
subject='{}, your house statement for {}'.format(
housemate.user.first_name or housemate.user.username,
# TODO: Assumes monthly billing cycles
self.date_range.lower.strftime('%B %Y'),
),
message='See {}{}'.format(
get_site_root(),
reverse('accounts:housemate_statement_email',
args=[housemate.uuid, str(self.date_range.lower)]
)
),
from_email=Settings.objects.get().email_from_address,
recipient_list=[housemate.user.email],
html_message=html,
)
def enact_all_costs(self):
from swiftwind.costs.models import RecurringCost
with transaction.atomic():
for recurring_cost in RecurringCost.objects.all():
try:
recurring_cost.enact(self)
except (CannotEnactUnenactableRecurringCostError, RecurringCostAlreadyEnactedForBillingCycle):
pass
self.transactions_created = True
self.save()
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disable_if_done()
def unenact_all_costs(self):
from swiftwind.costs.models import RecurringCost, RecurredCost
with transaction.atomic():
transaction_ids = list(Transaction.objects.filter(recurred_cost__billing_cycle=self).values_list('pk', flat=True))
RecurredCost.objects.filter(billing_cycle=self).delete()
Transaction.objects.filter(pk__in=transaction_ids).delete()
self.transactions_created = False
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disabled = False
recurring_cost.save()
recurring_cost.disable_if_done()
self.save()
def reenact_all_costs(self):
from swiftwind.costs.models import RecurringCost, RecurredCost
with transaction.atomic():
# We need to delete the recurred cost before the transactions
# otherwise django will complain that the RecurredCost.transaction
# field cannot be set to null
transaction_ids = list(Transaction.objects.filter(recurred_cost__billing_cycle=self).values_list('pk', flat=True))
RecurredCost.objects.filter(billing_cycle=self).delete()
Transaction.objects.filter(pk__in=transaction_ids).delete()
self.transactions_created = False
self.save()
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disabled = False
if not recurring_cost.is_enactable(self.start_date):
continue
recurring_cost.save()
recurring_cost.enact(self, disable_if_done=False)
self.transactions_created = True
self.save()
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disable_if_done()
|
adamcharnock/swiftwind | swiftwind/billing_cycle/models.py | BillingCycle.is_reconciled | python | def is_reconciled(self):
from hordak.models import StatementImport, StatementLine
since = datetime(
self.date_range.lower.year,
self.date_range.lower.month,
self.date_range.lower.day,
tzinfo=UTC
)
if not StatementImport.objects.filter(timestamp__gte=since).exists():
# No import done since the end of the above billing cycle, and reconciliation
# requires an import. Therefore reconciliation can not have been done
return False
if StatementLine.objects.filter(
transaction__isnull=True,
date__gte=self.date_range.lower,
date__lt=self.date_range.upper
).exists():
# There are statement lines for this period which have not been reconciled
return False
return True | Have transactions been imported and reconciled for this billing cycle? | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/billing_cycle/models.py#L156-L178 | null | class BillingCycle(models.Model):
# TODO: Currently does not support changing of billing-cycle type (i.e. monthly/weekly)
# once data has been created
uuid = SmallUUIDField(default=uuid_default(), editable=False)
date_range = DateRangeField(
db_index=True,
help_text='The start and end date of this billing cycle. '
'May not overlay with any other billing cycles.'
)
transactions_created = models.BooleanField(
default=False,
help_text='Have transactions been created for this billing cycle?'
)
statements_sent = models.BooleanField(
default=False,
help_text='Have we sent housemates their statements for this billing cycle?'
)
objects = BillingCycleManager()
class Meta:
ordering = ['date_range']
def __str__(self):
return 'Cycle starting {}'.format(formats.localize(self.date_range.lower, use_l10n=True))
def __repr__(self):
return 'BillingCycle <{}>'.format(self.date_range)
@classmethod
def populate(cls, as_of=None):
"""Ensure the next X years of billing cycles exist
"""
return cls._populate(as_of=as_of or date.today(), delete=True)
@classmethod
def repopulate(cls):
"""Create the next X years of billing cycles
Will delete any billing cycles which are in the future
"""
return cls._populate(as_of=date.today(), delete=False)
@classmethod
def _populate(cls, as_of=None, delete=False):
"""Populate the table with billing cycles starting from `as_of`
Args:
as_of (date): The date at which to begin the populating
delete (bool): Should future billing cycles be deleted?
"""
billing_cycle_helper = get_billing_cycle()
billing_cycles_exist = BillingCycle.objects.exists()
try:
current_billing_cycle = BillingCycle.objects.as_of(date=as_of)
except BillingCycle.DoesNotExist:
current_billing_cycle = None
# If no cycles exist then disable the deletion logic
if not billing_cycles_exist:
delete = False
# Cycles exist, but a date has been specified outside of them
if billing_cycles_exist and not current_billing_cycle:
raise CannotPopulateForDateOutsideExistingCycles()
# Omit the current billing cycle if we are deleting (as
# deleting the current billing cycle will be a Bad Idea)
omit_current = (current_billing_cycle and delete)
stop_date = as_of + relativedelta(years=settings.SWIFTWIND_BILLING_CYCLE_YEARS)
date_ranges = billing_cycle_helper.generate_date_ranges(as_of, stop_date=stop_date, omit_current=omit_current)
date_ranges = list(date_ranges)
beginning_date = date_ranges[0][0]
with db_transaction.atomic():
if delete:
# Delete all the future unused transactions
cls.objects.filter(start_date__gte=beginning_date).delete()
for start_date, end_date in date_ranges:
exists = BillingCycle.objects.filter(date_range=(start_date, end_date)).exists()
if exists:
if delete:
raise Exception(
'It should not be possible to get here as future billing cycles have just been deleted'
)
else:
# We're updating, so we can just ignore cycles that already exist
pass
else:
BillingCycle.objects.create(
date_range=(start_date, end_date),
)
def get_next(self):
"""Get the billing cycle after this one. May return None"""
return BillingCycle.objects.filter(date_range__gt=self.date_range).order_by('date_range').first()
def get_previous(self):
"""Get the billing cycle prior to this one. May return None"""
return BillingCycle.objects.filter(date_range__lt=self.date_range).order_by('date_range').last()
def notify_housemates(self):
"""Notify housemates in one of two ways:
1. Reconciliation is required before statements can be sent
2. Send a statement
"""
if self.is_reconciled():
self.send_statements()
else:
self.send_reconciliation_required()
def send_reconciliation_required(self):
from swiftwind.accounts.views import ReconciliationRequiredEmailView
for housemate in Housemate.objects.filter(user__is_active=True):
html = ReconciliationRequiredEmailView.get_html()
send_mail(
subject='Reconciliation required'.format(),
message='See {}{}'.format(
get_site_root(),
reverse('accounts:housemate_reconciliation_required_email')
),
from_email=Settings.objects.get().email_from_address,
recipient_list=[housemate.user.email],
html_message=html,
)
def can_create_transactions(self):
"""Can we create the transactions
We can only do this if the previous cycle has been reconciled,
as some costs may depend upon it to calculate their amounts.
"""
previous = self.get_previous()
return not previous or previous.is_reconciled()
def can_send_statements(self):
return self.can_create_transactions() and self.transactions_created
@transaction.atomic()
def send_statements(self, force=False):
from swiftwind.accounts.views import StatementEmailView
should_send = force or (not self.statements_sent and self.transactions_created)
if not should_send:
return False
for housemate in Housemate.objects.filter(user__is_active=True):
html = StatementEmailView.get_html(
uuid=housemate.uuid,
date=str(self.date_range.lower)
)
send_mail(
subject='{}, your house statement for {}'.format(
housemate.user.first_name or housemate.user.username,
# TODO: Assumes monthly billing cycles
self.date_range.lower.strftime('%B %Y'),
),
message='See {}{}'.format(
get_site_root(),
reverse('accounts:housemate_statement_email',
args=[housemate.uuid, str(self.date_range.lower)]
)
),
from_email=Settings.objects.get().email_from_address,
recipient_list=[housemate.user.email],
html_message=html,
)
def enact_all_costs(self):
from swiftwind.costs.models import RecurringCost
with transaction.atomic():
for recurring_cost in RecurringCost.objects.all():
try:
recurring_cost.enact(self)
except (CannotEnactUnenactableRecurringCostError, RecurringCostAlreadyEnactedForBillingCycle):
pass
self.transactions_created = True
self.save()
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disable_if_done()
def unenact_all_costs(self):
from swiftwind.costs.models import RecurringCost, RecurredCost
with transaction.atomic():
transaction_ids = list(Transaction.objects.filter(recurred_cost__billing_cycle=self).values_list('pk', flat=True))
RecurredCost.objects.filter(billing_cycle=self).delete()
Transaction.objects.filter(pk__in=transaction_ids).delete()
self.transactions_created = False
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disabled = False
recurring_cost.save()
recurring_cost.disable_if_done()
self.save()
def reenact_all_costs(self):
from swiftwind.costs.models import RecurringCost, RecurredCost
with transaction.atomic():
# We need to delete the recurred cost before the transactions
# otherwise django will complain that the RecurredCost.transaction
# field cannot be set to null
transaction_ids = list(Transaction.objects.filter(recurred_cost__billing_cycle=self).values_list('pk', flat=True))
RecurredCost.objects.filter(billing_cycle=self).delete()
Transaction.objects.filter(pk__in=transaction_ids).delete()
self.transactions_created = False
self.save()
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disabled = False
if not recurring_cost.is_enactable(self.start_date):
continue
recurring_cost.save()
recurring_cost.enact(self, disable_if_done=False)
self.transactions_created = True
self.save()
for recurring_cost in RecurringCost.objects.all():
recurring_cost.disable_if_done()
|
adamcharnock/swiftwind | swiftwind/core/templatetags/swiftwind_utilities.py | partition | python | def partition(list_, columns=2):
iter_ = iter(list_)
columns = int(columns)
rows = []
while True:
row = []
for column_number in range(1, columns + 1):
try:
value = six.next(iter_)
except StopIteration:
pass
else:
row.append(value)
if not row:
return rows
rows.append(row) | Break a list into ``columns`` number of columns. | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/core/templatetags/swiftwind_utilities.py#L8-L29 | null | import six
from django import template
register = template.Library()
@register.filter
@register.filter
def short_name(name):
bits = (name or '').split(' ')
if len(bits) == 0:
return name
else:
first = bits[0]
last = bits[-1]
if last:
# First + Initial
return ' '.join([first, last[0]])
else:
# No last name, just give the first name
return first
|
adamcharnock/swiftwind | swiftwind/dashboard/views.py | DashboardView.get_balance_context | python | def get_balance_context(self):
bank_account = Account.objects.get(name='Bank')
return dict(
bank=bank_account,
retained_earnings_accounts=Account.objects.filter(parent__name='Retained Earnings'),
) | Get the high level balances | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/dashboard/views.py#L15-L22 | null | class DashboardView(LoginRequiredMixin, TemplateView):
template_name = 'dashboard/dashboard.html'
def get_accounts_context(self):
"""Get the accounts we may want to display"""
income_parent = Account.objects.get(name='Income')
housemate_parent = Account.objects.get(name='Housemate Income')
expense_parent = Account.objects.get(name='Expenses')
current_liabilities_parent = Account.objects.get(name='Current Liabilities')
long_term_liabilities_parent = Account.objects.get(name='Long Term Liabilities')
return dict(
housemate_accounts=Account.objects.filter(parent=housemate_parent),
expense_accounts=expense_parent.get_descendants(),
current_liability_accounts=Account.objects.filter(parent=current_liabilities_parent),
long_term_liability_accounts=Account.objects.filter(parent=long_term_liabilities_parent),
other_income_accounts=Account.objects.filter(~Q(pk=housemate_parent.pk), parent=income_parent)
)
def get_context_data(self, **kwargs):
context = super(DashboardView, self).get_context_data()
context.update(**self.get_balance_context())
context.update(**self.get_accounts_context())
return context
|
adamcharnock/swiftwind | swiftwind/dashboard/views.py | DashboardView.get_accounts_context | python | def get_accounts_context(self):
income_parent = Account.objects.get(name='Income')
housemate_parent = Account.objects.get(name='Housemate Income')
expense_parent = Account.objects.get(name='Expenses')
current_liabilities_parent = Account.objects.get(name='Current Liabilities')
long_term_liabilities_parent = Account.objects.get(name='Long Term Liabilities')
return dict(
housemate_accounts=Account.objects.filter(parent=housemate_parent),
expense_accounts=expense_parent.get_descendants(),
current_liability_accounts=Account.objects.filter(parent=current_liabilities_parent),
long_term_liability_accounts=Account.objects.filter(parent=long_term_liabilities_parent),
other_income_accounts=Account.objects.filter(~Q(pk=housemate_parent.pk), parent=income_parent)
) | Get the accounts we may want to display | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/dashboard/views.py#L24-L38 | null | class DashboardView(LoginRequiredMixin, TemplateView):
template_name = 'dashboard/dashboard.html'
def get_balance_context(self):
"""Get the high level balances"""
bank_account = Account.objects.get(name='Bank')
return dict(
bank=bank_account,
retained_earnings_accounts=Account.objects.filter(parent__name='Retained Earnings'),
)
def get_context_data(self, **kwargs):
context = super(DashboardView, self).get_context_data()
context.update(**self.get_balance_context())
context.update(**self.get_accounts_context())
return context
|
occrp-attic/exactitude | exactitude/date.py | DateType.validate | python | def validate(self, obj, **kwargs):
obj = stringify(obj)
if obj is None:
return False
return self.DATE_RE.match(obj) is not None | Check if a thing is a valid date. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/date.py#L18-L23 | null | class DateType(ExactitudeType):
# JS: '^([12]\\d{3}(-[01]?[1-9](-[0123]?[1-9])?)?)?$'
DATE_RE = re.compile('^([12]\d{3}(-[01]?[0-9](-[0123]?[0-9]([T ]([012]?\d(:\d{1,2}(:\d{1,2}(\.\d{6})?(Z|[-+]\d{2}(:?\d{2})?)?)?)?)?)?)?)?)?$') # noqa
DATE_FULL = re.compile('\d{4}-\d{2}-\d{2}.*')
CUT_ZEROES = re.compile(r'((\-00.*)|(.00:00:00))$')
MAX_LENGTH = 19
def _clean_datetime(self, obj):
"""Python objects want to be text."""
if isinstance(obj, datetime):
# if it's not naive, put it on zulu time first:
if obj.tzinfo is not None:
obj = obj.astimezone(pytz.utc)
return obj.isoformat()[:self.MAX_LENGTH]
if isinstance(obj, date):
return obj.isoformat()
def _clean_text(self, text):
# limit to the date part of a presumed date string
# FIXME: this may get us rid of TZ info?
text = text[:self.MAX_LENGTH]
if not self.validate(text):
return None
text = text.replace(' ', 'T')
# fix up dates like 2017-1-5 into 2017-01-05
if not self.DATE_FULL.match(text):
parts = text.split('T', 1)
date = [p.zfill(2) for p in parts[0].split('-')]
parts[0] = '-'.join(date)
text = 'T'.join(parts)
text = text[:self.MAX_LENGTH]
# strip -00-00 from dates because it makes ES barf.
text = self.CUT_ZEROES.sub('', text)
return text
def clean(self, text, guess=True, format=None, **kwargs):
"""The classic: date parsing, every which way."""
# handle date/datetime before converting to text.
date = self._clean_datetime(text)
if date is not None:
return date
text = stringify(text)
if text is None:
return
if format is not None:
# parse with a specified format
try:
obj = datetime.strptime(text, format)
return obj.date().isoformat()
except Exception:
return None
if guess and not self.validate(text):
# use dateparser to guess the format
obj = self.fuzzy_date_parser(text)
if obj is not None:
return obj.date().isoformat()
return self._clean_text(text)
def fuzzy_date_parser(self, text):
"""Thin wrapper around ``parsedatetime`` and ``dateutil`` modules.
Since there's no upstream suppport for multiple locales, this wrapper
exists.
:param str text: Text to parse.
:returns: A parsed date/time object. Raises exception on failure.
:rtype: datetime
"""
try:
parsed = dateparser.parse(text, dayfirst=True)
return parsed
except (ValueError, TypeError):
locales = parsedatetime._locales[:]
# Loop through all the locales and try to parse successfully our
# string
for locale in locales:
const = parsedatetime.Constants(locale)
const.re_option += re.UNICODE
parser = parsedatetime.Calendar(const)
parsed, ok = parser.parse(text)
if ok:
return datetime(*parsed[:6])
|
occrp-attic/exactitude | exactitude/date.py | DateType._clean_datetime | python | def _clean_datetime(self, obj):
if isinstance(obj, datetime):
# if it's not naive, put it on zulu time first:
if obj.tzinfo is not None:
obj = obj.astimezone(pytz.utc)
return obj.isoformat()[:self.MAX_LENGTH]
if isinstance(obj, date):
return obj.isoformat() | Python objects want to be text. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/date.py#L25-L33 | null | class DateType(ExactitudeType):
# JS: '^([12]\\d{3}(-[01]?[1-9](-[0123]?[1-9])?)?)?$'
DATE_RE = re.compile('^([12]\d{3}(-[01]?[0-9](-[0123]?[0-9]([T ]([012]?\d(:\d{1,2}(:\d{1,2}(\.\d{6})?(Z|[-+]\d{2}(:?\d{2})?)?)?)?)?)?)?)?)?$') # noqa
DATE_FULL = re.compile('\d{4}-\d{2}-\d{2}.*')
CUT_ZEROES = re.compile(r'((\-00.*)|(.00:00:00))$')
MAX_LENGTH = 19
def validate(self, obj, **kwargs):
"""Check if a thing is a valid date."""
obj = stringify(obj)
if obj is None:
return False
return self.DATE_RE.match(obj) is not None
def _clean_text(self, text):
# limit to the date part of a presumed date string
# FIXME: this may get us rid of TZ info?
text = text[:self.MAX_LENGTH]
if not self.validate(text):
return None
text = text.replace(' ', 'T')
# fix up dates like 2017-1-5 into 2017-01-05
if not self.DATE_FULL.match(text):
parts = text.split('T', 1)
date = [p.zfill(2) for p in parts[0].split('-')]
parts[0] = '-'.join(date)
text = 'T'.join(parts)
text = text[:self.MAX_LENGTH]
# strip -00-00 from dates because it makes ES barf.
text = self.CUT_ZEROES.sub('', text)
return text
def clean(self, text, guess=True, format=None, **kwargs):
"""The classic: date parsing, every which way."""
# handle date/datetime before converting to text.
date = self._clean_datetime(text)
if date is not None:
return date
text = stringify(text)
if text is None:
return
if format is not None:
# parse with a specified format
try:
obj = datetime.strptime(text, format)
return obj.date().isoformat()
except Exception:
return None
if guess and not self.validate(text):
# use dateparser to guess the format
obj = self.fuzzy_date_parser(text)
if obj is not None:
return obj.date().isoformat()
return self._clean_text(text)
def fuzzy_date_parser(self, text):
"""Thin wrapper around ``parsedatetime`` and ``dateutil`` modules.
Since there's no upstream suppport for multiple locales, this wrapper
exists.
:param str text: Text to parse.
:returns: A parsed date/time object. Raises exception on failure.
:rtype: datetime
"""
try:
parsed = dateparser.parse(text, dayfirst=True)
return parsed
except (ValueError, TypeError):
locales = parsedatetime._locales[:]
# Loop through all the locales and try to parse successfully our
# string
for locale in locales:
const = parsedatetime.Constants(locale)
const.re_option += re.UNICODE
parser = parsedatetime.Calendar(const)
parsed, ok = parser.parse(text)
if ok:
return datetime(*parsed[:6])
|
occrp-attic/exactitude | exactitude/date.py | DateType.clean | python | def clean(self, text, guess=True, format=None, **kwargs):
# handle date/datetime before converting to text.
date = self._clean_datetime(text)
if date is not None:
return date
text = stringify(text)
if text is None:
return
if format is not None:
# parse with a specified format
try:
obj = datetime.strptime(text, format)
return obj.date().isoformat()
except Exception:
return None
if guess and not self.validate(text):
# use dateparser to guess the format
obj = self.fuzzy_date_parser(text)
if obj is not None:
return obj.date().isoformat()
return self._clean_text(text) | The classic: date parsing, every which way. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/date.py#L53-L78 | [
"def validate(self, obj, **kwargs):\n \"\"\"Check if a thing is a valid date.\"\"\"\n obj = stringify(obj)\n if obj is None:\n return False\n return self.DATE_RE.match(obj) is not None\n",
"def _clean_datetime(self, obj):\n \"\"\"Python objects want to be text.\"\"\"\n if isinstance(obj, ... | class DateType(ExactitudeType):
# JS: '^([12]\\d{3}(-[01]?[1-9](-[0123]?[1-9])?)?)?$'
DATE_RE = re.compile('^([12]\d{3}(-[01]?[0-9](-[0123]?[0-9]([T ]([012]?\d(:\d{1,2}(:\d{1,2}(\.\d{6})?(Z|[-+]\d{2}(:?\d{2})?)?)?)?)?)?)?)?)?$') # noqa
DATE_FULL = re.compile('\d{4}-\d{2}-\d{2}.*')
CUT_ZEROES = re.compile(r'((\-00.*)|(.00:00:00))$')
MAX_LENGTH = 19
def validate(self, obj, **kwargs):
"""Check if a thing is a valid date."""
obj = stringify(obj)
if obj is None:
return False
return self.DATE_RE.match(obj) is not None
def _clean_datetime(self, obj):
"""Python objects want to be text."""
if isinstance(obj, datetime):
# if it's not naive, put it on zulu time first:
if obj.tzinfo is not None:
obj = obj.astimezone(pytz.utc)
return obj.isoformat()[:self.MAX_LENGTH]
if isinstance(obj, date):
return obj.isoformat()
def _clean_text(self, text):
# limit to the date part of a presumed date string
# FIXME: this may get us rid of TZ info?
text = text[:self.MAX_LENGTH]
if not self.validate(text):
return None
text = text.replace(' ', 'T')
# fix up dates like 2017-1-5 into 2017-01-05
if not self.DATE_FULL.match(text):
parts = text.split('T', 1)
date = [p.zfill(2) for p in parts[0].split('-')]
parts[0] = '-'.join(date)
text = 'T'.join(parts)
text = text[:self.MAX_LENGTH]
# strip -00-00 from dates because it makes ES barf.
text = self.CUT_ZEROES.sub('', text)
return text
def fuzzy_date_parser(self, text):
"""Thin wrapper around ``parsedatetime`` and ``dateutil`` modules.
Since there's no upstream suppport for multiple locales, this wrapper
exists.
:param str text: Text to parse.
:returns: A parsed date/time object. Raises exception on failure.
:rtype: datetime
"""
try:
parsed = dateparser.parse(text, dayfirst=True)
return parsed
except (ValueError, TypeError):
locales = parsedatetime._locales[:]
# Loop through all the locales and try to parse successfully our
# string
for locale in locales:
const = parsedatetime.Constants(locale)
const.re_option += re.UNICODE
parser = parsedatetime.Calendar(const)
parsed, ok = parser.parse(text)
if ok:
return datetime(*parsed[:6])
|
occrp-attic/exactitude | exactitude/date.py | DateType.fuzzy_date_parser | python | def fuzzy_date_parser(self, text):
try:
parsed = dateparser.parse(text, dayfirst=True)
return parsed
except (ValueError, TypeError):
locales = parsedatetime._locales[:]
# Loop through all the locales and try to parse successfully our
# string
for locale in locales:
const = parsedatetime.Constants(locale)
const.re_option += re.UNICODE
parser = parsedatetime.Calendar(const)
parsed, ok = parser.parse(text)
if ok:
return datetime(*parsed[:6]) | Thin wrapper around ``parsedatetime`` and ``dateutil`` modules.
Since there's no upstream suppport for multiple locales, this wrapper
exists.
:param str text: Text to parse.
:returns: A parsed date/time object. Raises exception on failure.
:rtype: datetime | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/date.py#L80-L101 | null | class DateType(ExactitudeType):
# JS: '^([12]\\d{3}(-[01]?[1-9](-[0123]?[1-9])?)?)?$'
DATE_RE = re.compile('^([12]\d{3}(-[01]?[0-9](-[0123]?[0-9]([T ]([012]?\d(:\d{1,2}(:\d{1,2}(\.\d{6})?(Z|[-+]\d{2}(:?\d{2})?)?)?)?)?)?)?)?)?$') # noqa
DATE_FULL = re.compile('\d{4}-\d{2}-\d{2}.*')
CUT_ZEROES = re.compile(r'((\-00.*)|(.00:00:00))$')
MAX_LENGTH = 19
def validate(self, obj, **kwargs):
"""Check if a thing is a valid date."""
obj = stringify(obj)
if obj is None:
return False
return self.DATE_RE.match(obj) is not None
def _clean_datetime(self, obj):
"""Python objects want to be text."""
if isinstance(obj, datetime):
# if it's not naive, put it on zulu time first:
if obj.tzinfo is not None:
obj = obj.astimezone(pytz.utc)
return obj.isoformat()[:self.MAX_LENGTH]
if isinstance(obj, date):
return obj.isoformat()
def _clean_text(self, text):
# limit to the date part of a presumed date string
# FIXME: this may get us rid of TZ info?
text = text[:self.MAX_LENGTH]
if not self.validate(text):
return None
text = text.replace(' ', 'T')
# fix up dates like 2017-1-5 into 2017-01-05
if not self.DATE_FULL.match(text):
parts = text.split('T', 1)
date = [p.zfill(2) for p in parts[0].split('-')]
parts[0] = '-'.join(date)
text = 'T'.join(parts)
text = text[:self.MAX_LENGTH]
# strip -00-00 from dates because it makes ES barf.
text = self.CUT_ZEROES.sub('', text)
return text
def clean(self, text, guess=True, format=None, **kwargs):
"""The classic: date parsing, every which way."""
# handle date/datetime before converting to text.
date = self._clean_datetime(text)
if date is not None:
return date
text = stringify(text)
if text is None:
return
if format is not None:
# parse with a specified format
try:
obj = datetime.strptime(text, format)
return obj.date().isoformat()
except Exception:
return None
if guess and not self.validate(text):
# use dateparser to guess the format
obj = self.fuzzy_date_parser(text)
if obj is not None:
return obj.date().isoformat()
return self._clean_text(text)
|
occrp-attic/exactitude | exactitude/phone.py | PhoneType.clean_text | python | def clean_text(self, number, countries=None, country=None, **kwargs):
for code in self._clean_countries(countries, country):
try:
num = parse_number(number, code)
if is_possible_number(num):
if is_valid_number(num):
return format_number(num, PhoneNumberFormat.E164)
except NumberParseException:
pass | Parse a phone number and return in international format.
If no valid phone number can be detected, None is returned. If
a country code is supplied, this will be used to infer the
prefix.
https://github.com/daviddrysdale/python-phonenumbers | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/phone.py#L23-L39 | [
"def _clean_countries(self, countries, country):\n result = set([None])\n countries = ensure_list(countries)\n countries.extend(ensure_list(country))\n for country in countries:\n if isinstance(country, six.string_types):\n country = country.strip().upper()\n result.add(coun... | class PhoneType(ExactitudeType):
def _clean_countries(self, countries, country):
result = set([None])
countries = ensure_list(countries)
countries.extend(ensure_list(country))
for country in countries:
if isinstance(country, six.string_types):
country = country.strip().upper()
result.add(country)
return result
|
occrp-attic/exactitude | exactitude/ip.py | IpType.validate | python | def validate(self, ip, **kwargs):
if ip is None:
return False
ip = stringify(ip)
if self.IPV4_REGEX.match(ip):
try:
socket.inet_pton(socket.AF_INET, ip)
return True
except AttributeError: # no inet_pton here, sorry
try:
socket.inet_aton(ip)
except socket.error:
return False
return ip.count('.') == 3
except socket.error: # not a valid address
return False
if self.IPV6_REGEX.match(ip):
try:
socket.inet_pton(socket.AF_INET6, ip)
except socket.error: # not a valid address
return False
return True | Check to see if this is a valid ip address. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/ip.py#L15-L42 | null | class IpType(ExactitudeType):
IPV4_REGEX = re.compile(r'(([2][5][0-5]\.)|([2][0-4][0-9]\.)|([0-1]?[0-9]?[0-9]\.)){3}'+'(([2][5][0-5])|([2][0-4][0-9])|([0-1]?[0-9]?[0-9]))')
IPV6_REGEX = re.compile(r'(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))')
def validate(self, ip, **kwargs):
"""Check to see if this is a valid ip address."""
if ip is None:
return False
ip = stringify(ip)
if self.IPV4_REGEX.match(ip):
try:
socket.inet_pton(socket.AF_INET, ip)
return True
except AttributeError: # no inet_pton here, sorry
try:
socket.inet_aton(ip)
except socket.error:
return False
return ip.count('.') == 3
except socket.error: # not a valid address
return False
if self.IPV6_REGEX.match(ip):
try:
socket.inet_pton(socket.AF_INET6, ip)
except socket.error: # not a valid address
return False
return True
def clean(self, text, **kwargs):
"""Create a more clean, but still user-facing version of an
instance of the type."""
text = stringify(text)
if text is not None:
return text
|
occrp-attic/exactitude | exactitude/address.py | AddressType.clean_text | python | def clean_text(self, address, **kwargs):
address = self.LINE_BREAKS.sub(', ', address)
address = self.COMMATA.sub(', ', address)
address = collapse_spaces(address)
if len(address):
return address | Basic clean-up. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/address.py#L11-L17 | null | class AddressType(ExactitudeType):
LINE_BREAKS = re.compile(r'(\r\n|\n|<BR/>|<BR>|\t|ESQ\.,|ESQ,|;)')
COMMATA = re.compile(r'(,\s?[,\.])')
def normalize(self, address, **kwargs):
"""Make the address more compareable."""
# TODO: normalize well-known parts like "Street", "Road", etc.
# TODO: consider using https://github.com/openvenues/pypostal
addresses = super(AddressType, self).normalize(address, **kwargs)
return addresses
|
occrp-attic/exactitude | exactitude/address.py | AddressType.normalize | python | def normalize(self, address, **kwargs):
# TODO: normalize well-known parts like "Street", "Road", etc.
# TODO: consider using https://github.com/openvenues/pypostal
addresses = super(AddressType, self).normalize(address, **kwargs)
return addresses | Make the address more compareable. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/address.py#L19-L24 | [
"def normalize(self, text, cleaned=False, **kwargs):\n \"\"\"Create a represenation ideal for comparisons, but not to be\n shown to the user.\"\"\"\n if not cleaned:\n text = self.clean(text, **kwargs)\n return ensure_list(text)\n"
] | class AddressType(ExactitudeType):
LINE_BREAKS = re.compile(r'(\r\n|\n|<BR/>|<BR>|\t|ESQ\.,|ESQ,|;)')
COMMATA = re.compile(r'(,\s?[,\.])')
def clean_text(self, address, **kwargs):
"""Basic clean-up."""
address = self.LINE_BREAKS.sub(', ', address)
address = self.COMMATA.sub(', ', address)
address = collapse_spaces(address)
if len(address):
return address
|
occrp-attic/exactitude | exactitude/identifier.py | IdentifierType.normalize | python | def normalize(self, text, **kwargs):
identifiers = []
for ident in super(IdentifierType, self).normalize(text, **kwargs):
identifiers.append(normalize(ident))
return identifiers | Normalize for comparison. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/identifier.py#L9-L14 | [
"def normalize(self, text, cleaned=False, **kwargs):\n \"\"\"Create a represenation ideal for comparisons, but not to be\n shown to the user.\"\"\"\n if not cleaned:\n text = self.clean(text, **kwargs)\n return ensure_list(text)\n"
] | class IdentifierType(ExactitudeType):
"""Used for registration numbers, codes etc."""
|
occrp-attic/exactitude | exactitude/url.py | UrlType.clean_text | python | def clean_text(self, url, **kwargs):
try:
return normalize_url(url)
except UnicodeDecodeError:
log.warning("Invalid URL: %r", url) | Perform intensive care on URLs, see `urlnormalizer`. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/url.py#L15-L20 | null | class UrlType(ExactitudeType):
def validate(self, url, **kwargs):
"""Check if `url` is a valid URL."""
return is_valid_url(url)
|
occrp-attic/exactitude | exactitude/iban.py | IbanType.clean_text | python | def clean_text(self, text, **kwargs):
text = text.replace(" ", "")
text = text.upper()
return text | Create a more clean, but still user-facing version of an
instance of the type. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/iban.py#L21-L26 | null | class IbanType(ExactitudeType):
def validate(self, iban, **kwargs):
iban = stringify(iban)
if iban is None:
return False
try:
return iban_validator.is_valid(iban)
except iban.error: # not a valid iban
return False
|
occrp-attic/exactitude | exactitude/country.py | CountryType.clean_text | python | def clean_text(self, country, guess=False, **kwargs):
code = country.lower().strip()
if code in self.names:
return code
country = countrynames.to_code(country, fuzzy=guess)
if country is not None:
return country.lower() | Determine a two-letter country code based on an input.
The input may be a country code, a country name, etc. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/country.py#L35-L45 | null | class CountryType(ExactitudeType):
def __init__(self, *args):
super(CountryType, self).__init__(*args)
# extra countries that OCCRP is interested in.
self.names = {
'zz': 'Global',
'eu': 'European Union',
'xk': 'Kosovo',
'yucs': 'Yugoslavia',
'csxx': 'Serbia and Montenegro',
'suhh': 'Soviet Union',
'ge-ab': 'Abkhazia',
'x-so': 'South Ossetia',
'so-som': 'Somaliland',
'gb-wls': 'Wales',
'gb-sct': 'Scotland',
'md-pmr': 'Transnistria'
}
for code, label in self.locale.territories.items():
self.names[code.lower()] = label
def validate(self, country, **kwargs):
country = stringify(country)
if country is None:
return False
return country.lower() in self.names
|
occrp-attic/exactitude | exactitude/name.py | NameType.clean_text | python | def clean_text(self, name, **kwargs):
name = strip_quotes(name)
name = collapse_spaces(name)
return name | Basic clean-up. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/name.py#L8-L12 | null | class NameType(ExactitudeType):
|
occrp-attic/exactitude | exactitude/email.py | EmailType.validate | python | def validate(self, email, **kwargs):
email = stringify(email)
if email is None:
return
if not self.EMAIL_REGEX.match(email):
return False
mailbox, domain = email.rsplit('@', 1)
return self.domains.validate(domain, **kwargs) | Check to see if this is a valid email address. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/email.py#L15-L23 | null | class EmailType(ExactitudeType):
EMAIL_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+")
domains = DomainType()
def clean_text(self, email, **kwargs):
"""Parse and normalize an email address.
Returns None if this is not an email address.
"""
if not self.EMAIL_REGEX.match(email):
return None
email = strip_quotes(email)
mailbox, domain = email.rsplit('@', 1)
domain = self.domains.clean(domain, **kwargs)
if domain is None or mailbox is None:
return
return '@'.join((mailbox, domain))
def normalize(self, email, **kwargs):
"""Normalize for comparison."""
emails = super(EmailType, self).normalize(email, **kwargs)
return [e.lower() for e in emails]
|
occrp-attic/exactitude | exactitude/email.py | EmailType.clean_text | python | def clean_text(self, email, **kwargs):
if not self.EMAIL_REGEX.match(email):
return None
email = strip_quotes(email)
mailbox, domain = email.rsplit('@', 1)
domain = self.domains.clean(domain, **kwargs)
if domain is None or mailbox is None:
return
return '@'.join((mailbox, domain)) | Parse and normalize an email address.
Returns None if this is not an email address. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/email.py#L25-L37 | null | class EmailType(ExactitudeType):
EMAIL_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+")
domains = DomainType()
def validate(self, email, **kwargs):
"""Check to see if this is a valid email address."""
email = stringify(email)
if email is None:
return
if not self.EMAIL_REGEX.match(email):
return False
mailbox, domain = email.rsplit('@', 1)
return self.domains.validate(domain, **kwargs)
def normalize(self, email, **kwargs):
"""Normalize for comparison."""
emails = super(EmailType, self).normalize(email, **kwargs)
return [e.lower() for e in emails]
|
occrp-attic/exactitude | exactitude/email.py | EmailType.normalize | python | def normalize(self, email, **kwargs):
emails = super(EmailType, self).normalize(email, **kwargs)
return [e.lower() for e in emails] | Normalize for comparison. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/email.py#L39-L42 | [
"def normalize(self, text, cleaned=False, **kwargs):\n \"\"\"Create a represenation ideal for comparisons, but not to be\n shown to the user.\"\"\"\n if not cleaned:\n text = self.clean(text, **kwargs)\n return ensure_list(text)\n"
] | class EmailType(ExactitudeType):
EMAIL_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+")
domains = DomainType()
def validate(self, email, **kwargs):
"""Check to see if this is a valid email address."""
email = stringify(email)
if email is None:
return
if not self.EMAIL_REGEX.match(email):
return False
mailbox, domain = email.rsplit('@', 1)
return self.domains.validate(domain, **kwargs)
def clean_text(self, email, **kwargs):
"""Parse and normalize an email address.
Returns None if this is not an email address.
"""
if not self.EMAIL_REGEX.match(email):
return None
email = strip_quotes(email)
mailbox, domain = email.rsplit('@', 1)
domain = self.domains.clean(domain, **kwargs)
if domain is None or mailbox is None:
return
return '@'.join((mailbox, domain))
|
occrp-attic/exactitude | exactitude/common.py | ExactitudeType.validate | python | def validate(self, text, **kwargs):
cleaned = self.clean(text, **kwargs)
return cleaned is not None | Returns a boolean to indicate if this is a valid instance of
the type. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/common.py#L12-L16 | [
"def clean(self, text, **kwargs):\n \"\"\"Create a more clean, but still user-facing version of an\n instance of the type.\"\"\"\n text = stringify(text)\n if text is not None:\n return self.clean_text(text, **kwargs)\n"
] | class ExactitudeType(object):
"""Base class for all types."""
def __init__(self, locale='en_GB'):
self.locale = Locale(locale)
def clean(self, text, **kwargs):
"""Create a more clean, but still user-facing version of an
instance of the type."""
text = stringify(text)
if text is not None:
return self.clean_text(text, **kwargs)
def clean_text(self, text, **kwargs):
return text
def normalize(self, text, cleaned=False, **kwargs):
"""Create a represenation ideal for comparisons, but not to be
shown to the user."""
if not cleaned:
text = self.clean(text, **kwargs)
return ensure_list(text)
def normalize_set(self, items, **kwargs):
"""Utility to normalize a whole set of values and get unique
values."""
values = set()
for item in ensure_list(items):
values.update(self.normalize(item, **kwargs))
return list(values)
|
occrp-attic/exactitude | exactitude/common.py | ExactitudeType.clean | python | def clean(self, text, **kwargs):
text = stringify(text)
if text is not None:
return self.clean_text(text, **kwargs) | Create a more clean, but still user-facing version of an
instance of the type. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/common.py#L18-L23 | [
"def clean_text(self, address, **kwargs):\n \"\"\"Basic clean-up.\"\"\"\n address = self.LINE_BREAKS.sub(', ', address)\n address = self.COMMATA.sub(', ', address)\n address = collapse_spaces(address)\n if len(address):\n return address\n",
"def clean_text(self, text, **kwargs):\n return ... | class ExactitudeType(object):
"""Base class for all types."""
def __init__(self, locale='en_GB'):
self.locale = Locale(locale)
def validate(self, text, **kwargs):
"""Returns a boolean to indicate if this is a valid instance of
the type."""
cleaned = self.clean(text, **kwargs)
return cleaned is not None
def clean_text(self, text, **kwargs):
return text
def normalize(self, text, cleaned=False, **kwargs):
"""Create a represenation ideal for comparisons, but not to be
shown to the user."""
if not cleaned:
text = self.clean(text, **kwargs)
return ensure_list(text)
def normalize_set(self, items, **kwargs):
"""Utility to normalize a whole set of values and get unique
values."""
values = set()
for item in ensure_list(items):
values.update(self.normalize(item, **kwargs))
return list(values)
|
occrp-attic/exactitude | exactitude/common.py | ExactitudeType.normalize | python | def normalize(self, text, cleaned=False, **kwargs):
if not cleaned:
text = self.clean(text, **kwargs)
return ensure_list(text) | Create a represenation ideal for comparisons, but not to be
shown to the user. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/common.py#L28-L33 | [
"def clean(self, text, **kwargs):\n \"\"\"Create a more clean, but still user-facing version of an\n instance of the type.\"\"\"\n text = stringify(text)\n if text is not None:\n return self.clean_text(text, **kwargs)\n"
] | class ExactitudeType(object):
"""Base class for all types."""
def __init__(self, locale='en_GB'):
self.locale = Locale(locale)
def validate(self, text, **kwargs):
"""Returns a boolean to indicate if this is a valid instance of
the type."""
cleaned = self.clean(text, **kwargs)
return cleaned is not None
def clean(self, text, **kwargs):
"""Create a more clean, but still user-facing version of an
instance of the type."""
text = stringify(text)
if text is not None:
return self.clean_text(text, **kwargs)
def clean_text(self, text, **kwargs):
return text
def normalize_set(self, items, **kwargs):
"""Utility to normalize a whole set of values and get unique
values."""
values = set()
for item in ensure_list(items):
values.update(self.normalize(item, **kwargs))
return list(values)
|
occrp-attic/exactitude | exactitude/common.py | ExactitudeType.normalize_set | python | def normalize_set(self, items, **kwargs):
values = set()
for item in ensure_list(items):
values.update(self.normalize(item, **kwargs))
return list(values) | Utility to normalize a whole set of values and get unique
values. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/common.py#L35-L41 | [
"def normalize(self, text, cleaned=False, **kwargs):\n \"\"\"Create a represenation ideal for comparisons, but not to be\n shown to the user.\"\"\"\n if not cleaned:\n text = self.clean(text, **kwargs)\n return ensure_list(text)\n"
] | class ExactitudeType(object):
"""Base class for all types."""
def __init__(self, locale='en_GB'):
self.locale = Locale(locale)
def validate(self, text, **kwargs):
"""Returns a boolean to indicate if this is a valid instance of
the type."""
cleaned = self.clean(text, **kwargs)
return cleaned is not None
def clean(self, text, **kwargs):
"""Create a more clean, but still user-facing version of an
instance of the type."""
text = stringify(text)
if text is not None:
return self.clean_text(text, **kwargs)
def clean_text(self, text, **kwargs):
return text
def normalize(self, text, cleaned=False, **kwargs):
"""Create a represenation ideal for comparisons, but not to be
shown to the user."""
if not cleaned:
text = self.clean(text, **kwargs)
return ensure_list(text)
|
occrp-attic/exactitude | exactitude/domain.py | DomainType.validate | python | def validate(self, obj, **kwargs):
text = stringify(obj)
if text is None:
return False
if '.' not in text:
return False
if '@' in text or ':' in text:
return False
if len(text) < 4:
return False
return True | Check if a thing is a valid domain name. | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/domain.py#L20-L31 | null | class DomainType(ExactitudeType):
# TODO: https://pypi.python.org/pypi/publicsuffix/
# def _check_exists(self, domain):
# """Actually try to resolve a domain name."""
# try:
# domain = domain.encode('idna').lower()
# socket.getaddrinfo(domain, None)
# return True
# except:
# return False
def clean_text(self, domain, **kwargs):
"""Try to extract only the domain bit from the """
try:
# handle URLs by extracting the domain name
domain = urlparse(domain).hostname or domain
domain = domain.lower()
# get rid of port specs
domain = domain.rsplit(':', 1)[0]
domain = domain.rstrip('.')
# handle unicode
domain = domain.encode("idna").decode('ascii')
except ValueError:
return None
if self.validate(domain):
return domain
|
occrp-attic/exactitude | exactitude/domain.py | DomainType.clean_text | python | def clean_text(self, domain, **kwargs):
try:
# handle URLs by extracting the domain name
domain = urlparse(domain).hostname or domain
domain = domain.lower()
# get rid of port specs
domain = domain.rsplit(':', 1)[0]
domain = domain.rstrip('.')
# handle unicode
domain = domain.encode("idna").decode('ascii')
except ValueError:
return None
if self.validate(domain):
return domain | Try to extract only the domain bit from the | train | https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/domain.py#L33-L47 | [
"def validate(self, obj, **kwargs):\n \"\"\"Check if a thing is a valid domain name.\"\"\"\n text = stringify(obj)\n if text is None:\n return False\n if '.' not in text:\n return False\n if '@' in text or ':' in text:\n return False\n if len(text) < 4:\n return False\n... | class DomainType(ExactitudeType):
# TODO: https://pypi.python.org/pypi/publicsuffix/
# def _check_exists(self, domain):
# """Actually try to resolve a domain name."""
# try:
# domain = domain.encode('idna').lower()
# socket.getaddrinfo(domain, None)
# return True
# except:
# return False
def validate(self, obj, **kwargs):
"""Check if a thing is a valid domain name."""
text = stringify(obj)
if text is None:
return False
if '.' not in text:
return False
if '@' in text or ':' in text:
return False
if len(text) < 4:
return False
return True
|
sk-/git-lint | gitlint/__init__.py | find_invalid_filenames | python | def find_invalid_filenames(filenames, repository_root):
errors = []
for filename in filenames:
if not os.path.abspath(filename).startswith(repository_root):
errors.append((filename, 'Error: File %s does not belong to '
'repository %s' % (filename, repository_root)))
if not os.path.exists(filename):
errors.append((filename,
'Error: File %s does not exist' % (filename, )))
if os.path.isdir(filename):
errors.append((filename,
'Error: %s is a directory. Directories are'
' not yet supported' % (filename, )))
return errors | Find files that does not exist, are not in the repo or are directories.
Args:
filenames: list of filenames to check
repository_root: the absolute path of the repository's root.
Returns: A list of errors. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/__init__.py#L65-L87 | null | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
git-lint: improving source code one step at a time
Lints all the modified files in your git repository showing only the modified
lines.
It supports many filetypes, including:
PHP, Python, Javascript, Ruby, CSS, SCSS, PNG, JPEG, RST, YAML, INI, Java,
among others. See https://github.com/sk-/git-lint for the complete list.
Usage:
git-lint [-f | --force] [--json] [--last-commit] [FILENAME ...]
git-lint [-t | --tracked] [-f | --force] [--json] [--last-commit]
git-lint -h | --version
Options:
-h Show the usage patterns.
--version Prints the version number.
-f --force Shows all the lines with problems.
-t --tracked Lints only tracked files.
--json Prints the result as a json string. Useful to use it in
conjunction with other tools.
--last-commit Checks the last checked-out commit. This is mostly useful
when used as: git checkout <revid>; git lint --last-commit.
"""
from __future__ import unicode_literals
import codecs
import functools
import json
import multiprocessing
import os
import os.path
import sys
from concurrent import futures
import docopt
import termcolor
import yaml
import gitlint.git as git
import gitlint.hg as hg
import gitlint.linters as linters
from gitlint.version import __VERSION__
ERROR = termcolor.colored('ERROR', 'red', attrs=('bold', ))
SKIPPED = termcolor.colored('SKIPPED', 'yellow', attrs=('bold', ))
OK = termcolor.colored('OK', 'green', attrs=('bold', ))
def get_config(repo_root):
"""Gets the configuration file either from the repository or the default."""
config = os.path.join(os.path.dirname(__file__), 'configs', 'config.yaml')
if repo_root:
repo_config = os.path.join(repo_root, '.gitlint.yaml')
if os.path.exists(repo_config):
config = repo_config
with open(config) as f:
# We have to read the content first as yaml hangs up when reading from
# MockOpen
content = f.read()
# Yaml.load will return None when the input is empty.
if not content:
yaml_config = {}
else:
yaml_config = yaml.load(content)
return linters.parse_yaml_config(yaml_config, repo_root)
def format_comment(comment_data):
"""Formats the data returned by the linters.
Given a dictionary with the fields: line, column, severity, message_id,
message, will generate a message like:
'line {line}, col {column}: {severity}: [{message_id}]: {message}'
Any of the fields may nbe absent.
Args:
comment_data: dictionary with the linter data.
Returns:
a string with the formatted message.
"""
format_pieces = []
# Line and column information
if 'line' in comment_data:
format_pieces.append('line {line}')
if 'column' in comment_data:
if format_pieces:
format_pieces.append(', ')
format_pieces.append('col {column}')
if format_pieces:
format_pieces.append(': ')
# Severity and Id information
if 'severity' in comment_data:
format_pieces.append('{severity}: ')
if 'message_id' in comment_data:
format_pieces.append('[{message_id}]: ')
# The message
if 'message' in comment_data:
format_pieces.append('{message}')
return ''.join(format_pieces).format(**comment_data)
def get_vcs_root():
"""Returns the vcs module and the root of the repo.
Returns:
A tuple containing the vcs module to use (git, hg) and the root of the
repository. If no repository exisits then (None, None) is returned.
"""
for vcs in (git, hg):
repo_root = vcs.repository_root()
if repo_root:
return vcs, repo_root
return (None, None)
def process_file(vcs, commit, force, gitlint_config, file_data):
"""Lint the file
Returns:
The results from the linter.
"""
filename, extra_data = file_data
if force:
modified_lines = None
else:
modified_lines = vcs.modified_lines(
filename, extra_data, commit=commit)
result = linters.lint(filename, modified_lines, gitlint_config)
result = result[filename]
return filename, result
def main(argv, stdout=sys.stdout, stderr=sys.stderr):
"""Main gitlint routine. To be called from scripts."""
# Wrap sys stdout for python 2, so print can understand unicode.
linesep = os.linesep
if sys.version_info[0] < 3:
if stdout == sys.stdout:
stdout = codecs.getwriter("utf-8")(stdout)
if stderr == sys.stderr:
stderr = codecs.getwriter("utf-8")(stderr)
linesep = unicode(os.linesep) # pylint: disable=undefined-variable
arguments = docopt.docopt(
__doc__, argv=argv[1:], version='git-lint v%s' % __VERSION__)
json_output = arguments['--json']
vcs, repository_root = get_vcs_root()
if vcs is None:
stderr.write('fatal: Not a git repository' + linesep)
return 128
commit = None
if arguments['--last-commit']:
commit = vcs.last_commit()
if arguments['FILENAME']:
invalid_filenames = find_invalid_filenames(arguments['FILENAME'],
repository_root)
if invalid_filenames:
invalid_filenames.append(('', ''))
stderr.write(
linesep.join(invalid[1] for invalid in invalid_filenames))
return 2
changed_files = vcs.modified_files(
repository_root,
tracked_only=arguments['--tracked'],
commit=commit)
modified_files = {}
for filename in arguments['FILENAME']:
normalized_filename = os.path.abspath(filename)
modified_files[normalized_filename] = changed_files.get(
normalized_filename)
else:
modified_files = vcs.modified_files(
repository_root,
tracked_only=arguments['--tracked'],
commit=commit)
linter_not_found = False
files_with_problems = 0
gitlint_config = get_config(repository_root)
json_result = {}
with futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count())\
as executor:
processfile = functools.partial(process_file, vcs, commit,
arguments['--force'], gitlint_config)
for filename, result in executor.map(
processfile, [(filename, modified_files[filename])
for filename in sorted(modified_files.keys())]):
rel_filename = os.path.relpath(filename)
if not json_output:
stdout.write('Linting file: %s%s' % (termcolor.colored(
rel_filename, attrs=('bold', )), linesep))
output_lines = []
if result.get('error'):
output_lines.extend('%s: %s' % (ERROR, reason)
for reason in result.get('error'))
linter_not_found = True
if result.get('skipped'):
output_lines.extend('%s: %s' % (SKIPPED, reason)
for reason in result.get('skipped'))
if not result.get('comments', []):
if not output_lines:
output_lines.append(OK)
else:
files_with_problems += 1
for data in result['comments']:
formatted_message = format_comment(data)
output_lines.append(formatted_message)
data['formatted_message'] = formatted_message
if json_output:
json_result[filename] = result
else:
output = linesep.join(output_lines)
stdout.write(output)
stdout.write(linesep + linesep)
if json_output:
# Hack to convert to unicode, Python3 returns unicode, wheres Python2
# returns str.
stdout.write(
json.dumps(json_result,
ensure_ascii=False).encode('utf-8').decode('utf-8'))
if files_with_problems > 0:
return 1
if linter_not_found:
return 4
return 0
|
sk-/git-lint | gitlint/__init__.py | get_config | python | def get_config(repo_root):
config = os.path.join(os.path.dirname(__file__), 'configs', 'config.yaml')
if repo_root:
repo_config = os.path.join(repo_root, '.gitlint.yaml')
if os.path.exists(repo_config):
config = repo_config
with open(config) as f:
# We have to read the content first as yaml hangs up when reading from
# MockOpen
content = f.read()
# Yaml.load will return None when the input is empty.
if not content:
yaml_config = {}
else:
yaml_config = yaml.load(content)
return linters.parse_yaml_config(yaml_config, repo_root) | Gets the configuration file either from the repository or the default. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/__init__.py#L90-L109 | [
"def parse_yaml_config(yaml_config, repo_home):\n \"\"\"Converts a dictionary (parsed Yaml) to the internal representation.\"\"\"\n config = collections.defaultdict(list)\n\n variables = {\n 'DEFAULT_CONFIGS': os.path.join(os.path.dirname(__file__), 'configs'),\n 'REPO_HOME': repo_home,\n ... | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
git-lint: improving source code one step at a time
Lints all the modified files in your git repository showing only the modified
lines.
It supports many filetypes, including:
PHP, Python, Javascript, Ruby, CSS, SCSS, PNG, JPEG, RST, YAML, INI, Java,
among others. See https://github.com/sk-/git-lint for the complete list.
Usage:
git-lint [-f | --force] [--json] [--last-commit] [FILENAME ...]
git-lint [-t | --tracked] [-f | --force] [--json] [--last-commit]
git-lint -h | --version
Options:
-h Show the usage patterns.
--version Prints the version number.
-f --force Shows all the lines with problems.
-t --tracked Lints only tracked files.
--json Prints the result as a json string. Useful to use it in
conjunction with other tools.
--last-commit Checks the last checked-out commit. This is mostly useful
when used as: git checkout <revid>; git lint --last-commit.
"""
from __future__ import unicode_literals
import codecs
import functools
import json
import multiprocessing
import os
import os.path
import sys
from concurrent import futures
import docopt
import termcolor
import yaml
import gitlint.git as git
import gitlint.hg as hg
import gitlint.linters as linters
from gitlint.version import __VERSION__
ERROR = termcolor.colored('ERROR', 'red', attrs=('bold', ))
SKIPPED = termcolor.colored('SKIPPED', 'yellow', attrs=('bold', ))
OK = termcolor.colored('OK', 'green', attrs=('bold', ))
def find_invalid_filenames(filenames, repository_root):
"""Find files that does not exist, are not in the repo or are directories.
Args:
filenames: list of filenames to check
repository_root: the absolute path of the repository's root.
Returns: A list of errors.
"""
errors = []
for filename in filenames:
if not os.path.abspath(filename).startswith(repository_root):
errors.append((filename, 'Error: File %s does not belong to '
'repository %s' % (filename, repository_root)))
if not os.path.exists(filename):
errors.append((filename,
'Error: File %s does not exist' % (filename, )))
if os.path.isdir(filename):
errors.append((filename,
'Error: %s is a directory. Directories are'
' not yet supported' % (filename, )))
return errors
def format_comment(comment_data):
"""Formats the data returned by the linters.
Given a dictionary with the fields: line, column, severity, message_id,
message, will generate a message like:
'line {line}, col {column}: {severity}: [{message_id}]: {message}'
Any of the fields may nbe absent.
Args:
comment_data: dictionary with the linter data.
Returns:
a string with the formatted message.
"""
format_pieces = []
# Line and column information
if 'line' in comment_data:
format_pieces.append('line {line}')
if 'column' in comment_data:
if format_pieces:
format_pieces.append(', ')
format_pieces.append('col {column}')
if format_pieces:
format_pieces.append(': ')
# Severity and Id information
if 'severity' in comment_data:
format_pieces.append('{severity}: ')
if 'message_id' in comment_data:
format_pieces.append('[{message_id}]: ')
# The message
if 'message' in comment_data:
format_pieces.append('{message}')
return ''.join(format_pieces).format(**comment_data)
def get_vcs_root():
"""Returns the vcs module and the root of the repo.
Returns:
A tuple containing the vcs module to use (git, hg) and the root of the
repository. If no repository exisits then (None, None) is returned.
"""
for vcs in (git, hg):
repo_root = vcs.repository_root()
if repo_root:
return vcs, repo_root
return (None, None)
def process_file(vcs, commit, force, gitlint_config, file_data):
"""Lint the file
Returns:
The results from the linter.
"""
filename, extra_data = file_data
if force:
modified_lines = None
else:
modified_lines = vcs.modified_lines(
filename, extra_data, commit=commit)
result = linters.lint(filename, modified_lines, gitlint_config)
result = result[filename]
return filename, result
def main(argv, stdout=sys.stdout, stderr=sys.stderr):
"""Main gitlint routine. To be called from scripts."""
# Wrap sys stdout for python 2, so print can understand unicode.
linesep = os.linesep
if sys.version_info[0] < 3:
if stdout == sys.stdout:
stdout = codecs.getwriter("utf-8")(stdout)
if stderr == sys.stderr:
stderr = codecs.getwriter("utf-8")(stderr)
linesep = unicode(os.linesep) # pylint: disable=undefined-variable
arguments = docopt.docopt(
__doc__, argv=argv[1:], version='git-lint v%s' % __VERSION__)
json_output = arguments['--json']
vcs, repository_root = get_vcs_root()
if vcs is None:
stderr.write('fatal: Not a git repository' + linesep)
return 128
commit = None
if arguments['--last-commit']:
commit = vcs.last_commit()
if arguments['FILENAME']:
invalid_filenames = find_invalid_filenames(arguments['FILENAME'],
repository_root)
if invalid_filenames:
invalid_filenames.append(('', ''))
stderr.write(
linesep.join(invalid[1] for invalid in invalid_filenames))
return 2
changed_files = vcs.modified_files(
repository_root,
tracked_only=arguments['--tracked'],
commit=commit)
modified_files = {}
for filename in arguments['FILENAME']:
normalized_filename = os.path.abspath(filename)
modified_files[normalized_filename] = changed_files.get(
normalized_filename)
else:
modified_files = vcs.modified_files(
repository_root,
tracked_only=arguments['--tracked'],
commit=commit)
linter_not_found = False
files_with_problems = 0
gitlint_config = get_config(repository_root)
json_result = {}
with futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count())\
as executor:
processfile = functools.partial(process_file, vcs, commit,
arguments['--force'], gitlint_config)
for filename, result in executor.map(
processfile, [(filename, modified_files[filename])
for filename in sorted(modified_files.keys())]):
rel_filename = os.path.relpath(filename)
if not json_output:
stdout.write('Linting file: %s%s' % (termcolor.colored(
rel_filename, attrs=('bold', )), linesep))
output_lines = []
if result.get('error'):
output_lines.extend('%s: %s' % (ERROR, reason)
for reason in result.get('error'))
linter_not_found = True
if result.get('skipped'):
output_lines.extend('%s: %s' % (SKIPPED, reason)
for reason in result.get('skipped'))
if not result.get('comments', []):
if not output_lines:
output_lines.append(OK)
else:
files_with_problems += 1
for data in result['comments']:
formatted_message = format_comment(data)
output_lines.append(formatted_message)
data['formatted_message'] = formatted_message
if json_output:
json_result[filename] = result
else:
output = linesep.join(output_lines)
stdout.write(output)
stdout.write(linesep + linesep)
if json_output:
# Hack to convert to unicode, Python3 returns unicode, wheres Python2
# returns str.
stdout.write(
json.dumps(json_result,
ensure_ascii=False).encode('utf-8').decode('utf-8'))
if files_with_problems > 0:
return 1
if linter_not_found:
return 4
return 0
|
sk-/git-lint | gitlint/__init__.py | format_comment | python | def format_comment(comment_data):
format_pieces = []
# Line and column information
if 'line' in comment_data:
format_pieces.append('line {line}')
if 'column' in comment_data:
if format_pieces:
format_pieces.append(', ')
format_pieces.append('col {column}')
if format_pieces:
format_pieces.append(': ')
# Severity and Id information
if 'severity' in comment_data:
format_pieces.append('{severity}: ')
if 'message_id' in comment_data:
format_pieces.append('[{message_id}]: ')
# The message
if 'message' in comment_data:
format_pieces.append('{message}')
return ''.join(format_pieces).format(**comment_data) | Formats the data returned by the linters.
Given a dictionary with the fields: line, column, severity, message_id,
message, will generate a message like:
'line {line}, col {column}: {severity}: [{message_id}]: {message}'
Any of the fields may nbe absent.
Args:
comment_data: dictionary with the linter data.
Returns:
a string with the formatted message. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/__init__.py#L112-L150 | null | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
git-lint: improving source code one step at a time
Lints all the modified files in your git repository showing only the modified
lines.
It supports many filetypes, including:
PHP, Python, Javascript, Ruby, CSS, SCSS, PNG, JPEG, RST, YAML, INI, Java,
among others. See https://github.com/sk-/git-lint for the complete list.
Usage:
git-lint [-f | --force] [--json] [--last-commit] [FILENAME ...]
git-lint [-t | --tracked] [-f | --force] [--json] [--last-commit]
git-lint -h | --version
Options:
-h Show the usage patterns.
--version Prints the version number.
-f --force Shows all the lines with problems.
-t --tracked Lints only tracked files.
--json Prints the result as a json string. Useful to use it in
conjunction with other tools.
--last-commit Checks the last checked-out commit. This is mostly useful
when used as: git checkout <revid>; git lint --last-commit.
"""
from __future__ import unicode_literals
import codecs
import functools
import json
import multiprocessing
import os
import os.path
import sys
from concurrent import futures
import docopt
import termcolor
import yaml
import gitlint.git as git
import gitlint.hg as hg
import gitlint.linters as linters
from gitlint.version import __VERSION__
ERROR = termcolor.colored('ERROR', 'red', attrs=('bold', ))
SKIPPED = termcolor.colored('SKIPPED', 'yellow', attrs=('bold', ))
OK = termcolor.colored('OK', 'green', attrs=('bold', ))
def find_invalid_filenames(filenames, repository_root):
"""Find files that does not exist, are not in the repo or are directories.
Args:
filenames: list of filenames to check
repository_root: the absolute path of the repository's root.
Returns: A list of errors.
"""
errors = []
for filename in filenames:
if not os.path.abspath(filename).startswith(repository_root):
errors.append((filename, 'Error: File %s does not belong to '
'repository %s' % (filename, repository_root)))
if not os.path.exists(filename):
errors.append((filename,
'Error: File %s does not exist' % (filename, )))
if os.path.isdir(filename):
errors.append((filename,
'Error: %s is a directory. Directories are'
' not yet supported' % (filename, )))
return errors
def get_config(repo_root):
"""Gets the configuration file either from the repository or the default."""
config = os.path.join(os.path.dirname(__file__), 'configs', 'config.yaml')
if repo_root:
repo_config = os.path.join(repo_root, '.gitlint.yaml')
if os.path.exists(repo_config):
config = repo_config
with open(config) as f:
# We have to read the content first as yaml hangs up when reading from
# MockOpen
content = f.read()
# Yaml.load will return None when the input is empty.
if not content:
yaml_config = {}
else:
yaml_config = yaml.load(content)
return linters.parse_yaml_config(yaml_config, repo_root)
def get_vcs_root():
"""Returns the vcs module and the root of the repo.
Returns:
A tuple containing the vcs module to use (git, hg) and the root of the
repository. If no repository exisits then (None, None) is returned.
"""
for vcs in (git, hg):
repo_root = vcs.repository_root()
if repo_root:
return vcs, repo_root
return (None, None)
def process_file(vcs, commit, force, gitlint_config, file_data):
"""Lint the file
Returns:
The results from the linter.
"""
filename, extra_data = file_data
if force:
modified_lines = None
else:
modified_lines = vcs.modified_lines(
filename, extra_data, commit=commit)
result = linters.lint(filename, modified_lines, gitlint_config)
result = result[filename]
return filename, result
def main(argv, stdout=sys.stdout, stderr=sys.stderr):
"""Main gitlint routine. To be called from scripts."""
# Wrap sys stdout for python 2, so print can understand unicode.
linesep = os.linesep
if sys.version_info[0] < 3:
if stdout == sys.stdout:
stdout = codecs.getwriter("utf-8")(stdout)
if stderr == sys.stderr:
stderr = codecs.getwriter("utf-8")(stderr)
linesep = unicode(os.linesep) # pylint: disable=undefined-variable
arguments = docopt.docopt(
__doc__, argv=argv[1:], version='git-lint v%s' % __VERSION__)
json_output = arguments['--json']
vcs, repository_root = get_vcs_root()
if vcs is None:
stderr.write('fatal: Not a git repository' + linesep)
return 128
commit = None
if arguments['--last-commit']:
commit = vcs.last_commit()
if arguments['FILENAME']:
invalid_filenames = find_invalid_filenames(arguments['FILENAME'],
repository_root)
if invalid_filenames:
invalid_filenames.append(('', ''))
stderr.write(
linesep.join(invalid[1] for invalid in invalid_filenames))
return 2
changed_files = vcs.modified_files(
repository_root,
tracked_only=arguments['--tracked'],
commit=commit)
modified_files = {}
for filename in arguments['FILENAME']:
normalized_filename = os.path.abspath(filename)
modified_files[normalized_filename] = changed_files.get(
normalized_filename)
else:
modified_files = vcs.modified_files(
repository_root,
tracked_only=arguments['--tracked'],
commit=commit)
linter_not_found = False
files_with_problems = 0
gitlint_config = get_config(repository_root)
json_result = {}
with futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count())\
as executor:
processfile = functools.partial(process_file, vcs, commit,
arguments['--force'], gitlint_config)
for filename, result in executor.map(
processfile, [(filename, modified_files[filename])
for filename in sorted(modified_files.keys())]):
rel_filename = os.path.relpath(filename)
if not json_output:
stdout.write('Linting file: %s%s' % (termcolor.colored(
rel_filename, attrs=('bold', )), linesep))
output_lines = []
if result.get('error'):
output_lines.extend('%s: %s' % (ERROR, reason)
for reason in result.get('error'))
linter_not_found = True
if result.get('skipped'):
output_lines.extend('%s: %s' % (SKIPPED, reason)
for reason in result.get('skipped'))
if not result.get('comments', []):
if not output_lines:
output_lines.append(OK)
else:
files_with_problems += 1
for data in result['comments']:
formatted_message = format_comment(data)
output_lines.append(formatted_message)
data['formatted_message'] = formatted_message
if json_output:
json_result[filename] = result
else:
output = linesep.join(output_lines)
stdout.write(output)
stdout.write(linesep + linesep)
if json_output:
# Hack to convert to unicode, Python3 returns unicode, wheres Python2
# returns str.
stdout.write(
json.dumps(json_result,
ensure_ascii=False).encode('utf-8').decode('utf-8'))
if files_with_problems > 0:
return 1
if linter_not_found:
return 4
return 0
|
sk-/git-lint | gitlint/__init__.py | get_vcs_root | python | def get_vcs_root():
for vcs in (git, hg):
repo_root = vcs.repository_root()
if repo_root:
return vcs, repo_root
return (None, None) | Returns the vcs module and the root of the repo.
Returns:
A tuple containing the vcs module to use (git, hg) and the root of the
repository. If no repository exisits then (None, None) is returned. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/__init__.py#L153-L165 | [
"def repository_root():\n \"\"\"Returns the root of the repository as an absolute path.\"\"\"\n try:\n root = subprocess.check_output(\n ['hg', 'root'], stderr=subprocess.STDOUT).strip()\n # Convert to unicode first\n return root.decode('utf-8')\n except subprocess.CalledPro... | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
git-lint: improving source code one step at a time
Lints all the modified files in your git repository showing only the modified
lines.
It supports many filetypes, including:
PHP, Python, Javascript, Ruby, CSS, SCSS, PNG, JPEG, RST, YAML, INI, Java,
among others. See https://github.com/sk-/git-lint for the complete list.
Usage:
git-lint [-f | --force] [--json] [--last-commit] [FILENAME ...]
git-lint [-t | --tracked] [-f | --force] [--json] [--last-commit]
git-lint -h | --version
Options:
-h Show the usage patterns.
--version Prints the version number.
-f --force Shows all the lines with problems.
-t --tracked Lints only tracked files.
--json Prints the result as a json string. Useful to use it in
conjunction with other tools.
--last-commit Checks the last checked-out commit. This is mostly useful
when used as: git checkout <revid>; git lint --last-commit.
"""
from __future__ import unicode_literals
import codecs
import functools
import json
import multiprocessing
import os
import os.path
import sys
from concurrent import futures
import docopt
import termcolor
import yaml
import gitlint.git as git
import gitlint.hg as hg
import gitlint.linters as linters
from gitlint.version import __VERSION__
ERROR = termcolor.colored('ERROR', 'red', attrs=('bold', ))
SKIPPED = termcolor.colored('SKIPPED', 'yellow', attrs=('bold', ))
OK = termcolor.colored('OK', 'green', attrs=('bold', ))
def find_invalid_filenames(filenames, repository_root):
"""Find files that does not exist, are not in the repo or are directories.
Args:
filenames: list of filenames to check
repository_root: the absolute path of the repository's root.
Returns: A list of errors.
"""
errors = []
for filename in filenames:
if not os.path.abspath(filename).startswith(repository_root):
errors.append((filename, 'Error: File %s does not belong to '
'repository %s' % (filename, repository_root)))
if not os.path.exists(filename):
errors.append((filename,
'Error: File %s does not exist' % (filename, )))
if os.path.isdir(filename):
errors.append((filename,
'Error: %s is a directory. Directories are'
' not yet supported' % (filename, )))
return errors
def get_config(repo_root):
"""Gets the configuration file either from the repository or the default."""
config = os.path.join(os.path.dirname(__file__), 'configs', 'config.yaml')
if repo_root:
repo_config = os.path.join(repo_root, '.gitlint.yaml')
if os.path.exists(repo_config):
config = repo_config
with open(config) as f:
# We have to read the content first as yaml hangs up when reading from
# MockOpen
content = f.read()
# Yaml.load will return None when the input is empty.
if not content:
yaml_config = {}
else:
yaml_config = yaml.load(content)
return linters.parse_yaml_config(yaml_config, repo_root)
def format_comment(comment_data):
"""Formats the data returned by the linters.
Given a dictionary with the fields: line, column, severity, message_id,
message, will generate a message like:
'line {line}, col {column}: {severity}: [{message_id}]: {message}'
Any of the fields may nbe absent.
Args:
comment_data: dictionary with the linter data.
Returns:
a string with the formatted message.
"""
format_pieces = []
# Line and column information
if 'line' in comment_data:
format_pieces.append('line {line}')
if 'column' in comment_data:
if format_pieces:
format_pieces.append(', ')
format_pieces.append('col {column}')
if format_pieces:
format_pieces.append(': ')
# Severity and Id information
if 'severity' in comment_data:
format_pieces.append('{severity}: ')
if 'message_id' in comment_data:
format_pieces.append('[{message_id}]: ')
# The message
if 'message' in comment_data:
format_pieces.append('{message}')
return ''.join(format_pieces).format(**comment_data)
def process_file(vcs, commit, force, gitlint_config, file_data):
"""Lint the file
Returns:
The results from the linter.
"""
filename, extra_data = file_data
if force:
modified_lines = None
else:
modified_lines = vcs.modified_lines(
filename, extra_data, commit=commit)
result = linters.lint(filename, modified_lines, gitlint_config)
result = result[filename]
return filename, result
def main(argv, stdout=sys.stdout, stderr=sys.stderr):
"""Main gitlint routine. To be called from scripts."""
# Wrap sys stdout for python 2, so print can understand unicode.
linesep = os.linesep
if sys.version_info[0] < 3:
if stdout == sys.stdout:
stdout = codecs.getwriter("utf-8")(stdout)
if stderr == sys.stderr:
stderr = codecs.getwriter("utf-8")(stderr)
linesep = unicode(os.linesep) # pylint: disable=undefined-variable
arguments = docopt.docopt(
__doc__, argv=argv[1:], version='git-lint v%s' % __VERSION__)
json_output = arguments['--json']
vcs, repository_root = get_vcs_root()
if vcs is None:
stderr.write('fatal: Not a git repository' + linesep)
return 128
commit = None
if arguments['--last-commit']:
commit = vcs.last_commit()
if arguments['FILENAME']:
invalid_filenames = find_invalid_filenames(arguments['FILENAME'],
repository_root)
if invalid_filenames:
invalid_filenames.append(('', ''))
stderr.write(
linesep.join(invalid[1] for invalid in invalid_filenames))
return 2
changed_files = vcs.modified_files(
repository_root,
tracked_only=arguments['--tracked'],
commit=commit)
modified_files = {}
for filename in arguments['FILENAME']:
normalized_filename = os.path.abspath(filename)
modified_files[normalized_filename] = changed_files.get(
normalized_filename)
else:
modified_files = vcs.modified_files(
repository_root,
tracked_only=arguments['--tracked'],
commit=commit)
linter_not_found = False
files_with_problems = 0
gitlint_config = get_config(repository_root)
json_result = {}
with futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count())\
as executor:
processfile = functools.partial(process_file, vcs, commit,
arguments['--force'], gitlint_config)
for filename, result in executor.map(
processfile, [(filename, modified_files[filename])
for filename in sorted(modified_files.keys())]):
rel_filename = os.path.relpath(filename)
if not json_output:
stdout.write('Linting file: %s%s' % (termcolor.colored(
rel_filename, attrs=('bold', )), linesep))
output_lines = []
if result.get('error'):
output_lines.extend('%s: %s' % (ERROR, reason)
for reason in result.get('error'))
linter_not_found = True
if result.get('skipped'):
output_lines.extend('%s: %s' % (SKIPPED, reason)
for reason in result.get('skipped'))
if not result.get('comments', []):
if not output_lines:
output_lines.append(OK)
else:
files_with_problems += 1
for data in result['comments']:
formatted_message = format_comment(data)
output_lines.append(formatted_message)
data['formatted_message'] = formatted_message
if json_output:
json_result[filename] = result
else:
output = linesep.join(output_lines)
stdout.write(output)
stdout.write(linesep + linesep)
if json_output:
# Hack to convert to unicode, Python3 returns unicode, wheres Python2
# returns str.
stdout.write(
json.dumps(json_result,
ensure_ascii=False).encode('utf-8').decode('utf-8'))
if files_with_problems > 0:
return 1
if linter_not_found:
return 4
return 0
|
sk-/git-lint | gitlint/__init__.py | process_file | python | def process_file(vcs, commit, force, gitlint_config, file_data):
filename, extra_data = file_data
if force:
modified_lines = None
else:
modified_lines = vcs.modified_lines(
filename, extra_data, commit=commit)
result = linters.lint(filename, modified_lines, gitlint_config)
result = result[filename]
return filename, result | Lint the file
Returns:
The results from the linter. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/__init__.py#L168-L184 | [
"def lint(filename, lines, config):\n \"\"\"Lints a file.\n\n Args:\n filename: string: filename to lint.\n lines: list[int]|None: list of lines that we want to capture. If None,\n then all lines will be captured.\n config: dict[string: linter]: mapping from extension to a linter... | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
git-lint: improving source code one step at a time
Lints all the modified files in your git repository showing only the modified
lines.
It supports many filetypes, including:
PHP, Python, Javascript, Ruby, CSS, SCSS, PNG, JPEG, RST, YAML, INI, Java,
among others. See https://github.com/sk-/git-lint for the complete list.
Usage:
git-lint [-f | --force] [--json] [--last-commit] [FILENAME ...]
git-lint [-t | --tracked] [-f | --force] [--json] [--last-commit]
git-lint -h | --version
Options:
-h Show the usage patterns.
--version Prints the version number.
-f --force Shows all the lines with problems.
-t --tracked Lints only tracked files.
--json Prints the result as a json string. Useful to use it in
conjunction with other tools.
--last-commit Checks the last checked-out commit. This is mostly useful
when used as: git checkout <revid>; git lint --last-commit.
"""
from __future__ import unicode_literals
import codecs
import functools
import json
import multiprocessing
import os
import os.path
import sys
from concurrent import futures
import docopt
import termcolor
import yaml
import gitlint.git as git
import gitlint.hg as hg
import gitlint.linters as linters
from gitlint.version import __VERSION__
ERROR = termcolor.colored('ERROR', 'red', attrs=('bold', ))
SKIPPED = termcolor.colored('SKIPPED', 'yellow', attrs=('bold', ))
OK = termcolor.colored('OK', 'green', attrs=('bold', ))
def find_invalid_filenames(filenames, repository_root):
"""Find files that does not exist, are not in the repo or are directories.
Args:
filenames: list of filenames to check
repository_root: the absolute path of the repository's root.
Returns: A list of errors.
"""
errors = []
for filename in filenames:
if not os.path.abspath(filename).startswith(repository_root):
errors.append((filename, 'Error: File %s does not belong to '
'repository %s' % (filename, repository_root)))
if not os.path.exists(filename):
errors.append((filename,
'Error: File %s does not exist' % (filename, )))
if os.path.isdir(filename):
errors.append((filename,
'Error: %s is a directory. Directories are'
' not yet supported' % (filename, )))
return errors
def get_config(repo_root):
"""Gets the configuration file either from the repository or the default."""
config = os.path.join(os.path.dirname(__file__), 'configs', 'config.yaml')
if repo_root:
repo_config = os.path.join(repo_root, '.gitlint.yaml')
if os.path.exists(repo_config):
config = repo_config
with open(config) as f:
# We have to read the content first as yaml hangs up when reading from
# MockOpen
content = f.read()
# Yaml.load will return None when the input is empty.
if not content:
yaml_config = {}
else:
yaml_config = yaml.load(content)
return linters.parse_yaml_config(yaml_config, repo_root)
def format_comment(comment_data):
"""Formats the data returned by the linters.
Given a dictionary with the fields: line, column, severity, message_id,
message, will generate a message like:
'line {line}, col {column}: {severity}: [{message_id}]: {message}'
Any of the fields may nbe absent.
Args:
comment_data: dictionary with the linter data.
Returns:
a string with the formatted message.
"""
format_pieces = []
# Line and column information
if 'line' in comment_data:
format_pieces.append('line {line}')
if 'column' in comment_data:
if format_pieces:
format_pieces.append(', ')
format_pieces.append('col {column}')
if format_pieces:
format_pieces.append(': ')
# Severity and Id information
if 'severity' in comment_data:
format_pieces.append('{severity}: ')
if 'message_id' in comment_data:
format_pieces.append('[{message_id}]: ')
# The message
if 'message' in comment_data:
format_pieces.append('{message}')
return ''.join(format_pieces).format(**comment_data)
def get_vcs_root():
"""Returns the vcs module and the root of the repo.
Returns:
A tuple containing the vcs module to use (git, hg) and the root of the
repository. If no repository exisits then (None, None) is returned.
"""
for vcs in (git, hg):
repo_root = vcs.repository_root()
if repo_root:
return vcs, repo_root
return (None, None)
def main(argv, stdout=sys.stdout, stderr=sys.stderr):
"""Main gitlint routine. To be called from scripts."""
# Wrap sys stdout for python 2, so print can understand unicode.
linesep = os.linesep
if sys.version_info[0] < 3:
if stdout == sys.stdout:
stdout = codecs.getwriter("utf-8")(stdout)
if stderr == sys.stderr:
stderr = codecs.getwriter("utf-8")(stderr)
linesep = unicode(os.linesep) # pylint: disable=undefined-variable
arguments = docopt.docopt(
__doc__, argv=argv[1:], version='git-lint v%s' % __VERSION__)
json_output = arguments['--json']
vcs, repository_root = get_vcs_root()
if vcs is None:
stderr.write('fatal: Not a git repository' + linesep)
return 128
commit = None
if arguments['--last-commit']:
commit = vcs.last_commit()
if arguments['FILENAME']:
invalid_filenames = find_invalid_filenames(arguments['FILENAME'],
repository_root)
if invalid_filenames:
invalid_filenames.append(('', ''))
stderr.write(
linesep.join(invalid[1] for invalid in invalid_filenames))
return 2
changed_files = vcs.modified_files(
repository_root,
tracked_only=arguments['--tracked'],
commit=commit)
modified_files = {}
for filename in arguments['FILENAME']:
normalized_filename = os.path.abspath(filename)
modified_files[normalized_filename] = changed_files.get(
normalized_filename)
else:
modified_files = vcs.modified_files(
repository_root,
tracked_only=arguments['--tracked'],
commit=commit)
linter_not_found = False
files_with_problems = 0
gitlint_config = get_config(repository_root)
json_result = {}
with futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count())\
as executor:
processfile = functools.partial(process_file, vcs, commit,
arguments['--force'], gitlint_config)
for filename, result in executor.map(
processfile, [(filename, modified_files[filename])
for filename in sorted(modified_files.keys())]):
rel_filename = os.path.relpath(filename)
if not json_output:
stdout.write('Linting file: %s%s' % (termcolor.colored(
rel_filename, attrs=('bold', )), linesep))
output_lines = []
if result.get('error'):
output_lines.extend('%s: %s' % (ERROR, reason)
for reason in result.get('error'))
linter_not_found = True
if result.get('skipped'):
output_lines.extend('%s: %s' % (SKIPPED, reason)
for reason in result.get('skipped'))
if not result.get('comments', []):
if not output_lines:
output_lines.append(OK)
else:
files_with_problems += 1
for data in result['comments']:
formatted_message = format_comment(data)
output_lines.append(formatted_message)
data['formatted_message'] = formatted_message
if json_output:
json_result[filename] = result
else:
output = linesep.join(output_lines)
stdout.write(output)
stdout.write(linesep + linesep)
if json_output:
# Hack to convert to unicode, Python3 returns unicode, wheres Python2
# returns str.
stdout.write(
json.dumps(json_result,
ensure_ascii=False).encode('utf-8').decode('utf-8'))
if files_with_problems > 0:
return 1
if linter_not_found:
return 4
return 0
|
sk-/git-lint | gitlint/__init__.py | main | python | def main(argv, stdout=sys.stdout, stderr=sys.stderr):
# Wrap sys stdout for python 2, so print can understand unicode.
linesep = os.linesep
if sys.version_info[0] < 3:
if stdout == sys.stdout:
stdout = codecs.getwriter("utf-8")(stdout)
if stderr == sys.stderr:
stderr = codecs.getwriter("utf-8")(stderr)
linesep = unicode(os.linesep) # pylint: disable=undefined-variable
arguments = docopt.docopt(
__doc__, argv=argv[1:], version='git-lint v%s' % __VERSION__)
json_output = arguments['--json']
vcs, repository_root = get_vcs_root()
if vcs is None:
stderr.write('fatal: Not a git repository' + linesep)
return 128
commit = None
if arguments['--last-commit']:
commit = vcs.last_commit()
if arguments['FILENAME']:
invalid_filenames = find_invalid_filenames(arguments['FILENAME'],
repository_root)
if invalid_filenames:
invalid_filenames.append(('', ''))
stderr.write(
linesep.join(invalid[1] for invalid in invalid_filenames))
return 2
changed_files = vcs.modified_files(
repository_root,
tracked_only=arguments['--tracked'],
commit=commit)
modified_files = {}
for filename in arguments['FILENAME']:
normalized_filename = os.path.abspath(filename)
modified_files[normalized_filename] = changed_files.get(
normalized_filename)
else:
modified_files = vcs.modified_files(
repository_root,
tracked_only=arguments['--tracked'],
commit=commit)
linter_not_found = False
files_with_problems = 0
gitlint_config = get_config(repository_root)
json_result = {}
with futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count())\
as executor:
processfile = functools.partial(process_file, vcs, commit,
arguments['--force'], gitlint_config)
for filename, result in executor.map(
processfile, [(filename, modified_files[filename])
for filename in sorted(modified_files.keys())]):
rel_filename = os.path.relpath(filename)
if not json_output:
stdout.write('Linting file: %s%s' % (termcolor.colored(
rel_filename, attrs=('bold', )), linesep))
output_lines = []
if result.get('error'):
output_lines.extend('%s: %s' % (ERROR, reason)
for reason in result.get('error'))
linter_not_found = True
if result.get('skipped'):
output_lines.extend('%s: %s' % (SKIPPED, reason)
for reason in result.get('skipped'))
if not result.get('comments', []):
if not output_lines:
output_lines.append(OK)
else:
files_with_problems += 1
for data in result['comments']:
formatted_message = format_comment(data)
output_lines.append(formatted_message)
data['formatted_message'] = formatted_message
if json_output:
json_result[filename] = result
else:
output = linesep.join(output_lines)
stdout.write(output)
stdout.write(linesep + linesep)
if json_output:
# Hack to convert to unicode, Python3 returns unicode, wheres Python2
# returns str.
stdout.write(
json.dumps(json_result,
ensure_ascii=False).encode('utf-8').decode('utf-8'))
if files_with_problems > 0:
return 1
if linter_not_found:
return 4
return 0 | Main gitlint routine. To be called from scripts. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/__init__.py#L187-L292 | [
"def find_invalid_filenames(filenames, repository_root):\n \"\"\"Find files that does not exist, are not in the repo or are directories.\n\n Args:\n filenames: list of filenames to check\n repository_root: the absolute path of the repository's root.\n\n Returns: A list of errors.\n \"\"\"\n ... | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
git-lint: improving source code one step at a time
Lints all the modified files in your git repository showing only the modified
lines.
It supports many filetypes, including:
PHP, Python, Javascript, Ruby, CSS, SCSS, PNG, JPEG, RST, YAML, INI, Java,
among others. See https://github.com/sk-/git-lint for the complete list.
Usage:
git-lint [-f | --force] [--json] [--last-commit] [FILENAME ...]
git-lint [-t | --tracked] [-f | --force] [--json] [--last-commit]
git-lint -h | --version
Options:
-h Show the usage patterns.
--version Prints the version number.
-f --force Shows all the lines with problems.
-t --tracked Lints only tracked files.
--json Prints the result as a json string. Useful to use it in
conjunction with other tools.
--last-commit Checks the last checked-out commit. This is mostly useful
when used as: git checkout <revid>; git lint --last-commit.
"""
from __future__ import unicode_literals
import codecs
import functools
import json
import multiprocessing
import os
import os.path
import sys
from concurrent import futures
import docopt
import termcolor
import yaml
import gitlint.git as git
import gitlint.hg as hg
import gitlint.linters as linters
from gitlint.version import __VERSION__
ERROR = termcolor.colored('ERROR', 'red', attrs=('bold', ))
SKIPPED = termcolor.colored('SKIPPED', 'yellow', attrs=('bold', ))
OK = termcolor.colored('OK', 'green', attrs=('bold', ))
def find_invalid_filenames(filenames, repository_root):
"""Find files that does not exist, are not in the repo or are directories.
Args:
filenames: list of filenames to check
repository_root: the absolute path of the repository's root.
Returns: A list of errors.
"""
errors = []
for filename in filenames:
if not os.path.abspath(filename).startswith(repository_root):
errors.append((filename, 'Error: File %s does not belong to '
'repository %s' % (filename, repository_root)))
if not os.path.exists(filename):
errors.append((filename,
'Error: File %s does not exist' % (filename, )))
if os.path.isdir(filename):
errors.append((filename,
'Error: %s is a directory. Directories are'
' not yet supported' % (filename, )))
return errors
def get_config(repo_root):
"""Gets the configuration file either from the repository or the default."""
config = os.path.join(os.path.dirname(__file__), 'configs', 'config.yaml')
if repo_root:
repo_config = os.path.join(repo_root, '.gitlint.yaml')
if os.path.exists(repo_config):
config = repo_config
with open(config) as f:
# We have to read the content first as yaml hangs up when reading from
# MockOpen
content = f.read()
# Yaml.load will return None when the input is empty.
if not content:
yaml_config = {}
else:
yaml_config = yaml.load(content)
return linters.parse_yaml_config(yaml_config, repo_root)
def format_comment(comment_data):
"""Formats the data returned by the linters.
Given a dictionary with the fields: line, column, severity, message_id,
message, will generate a message like:
'line {line}, col {column}: {severity}: [{message_id}]: {message}'
Any of the fields may nbe absent.
Args:
comment_data: dictionary with the linter data.
Returns:
a string with the formatted message.
"""
format_pieces = []
# Line and column information
if 'line' in comment_data:
format_pieces.append('line {line}')
if 'column' in comment_data:
if format_pieces:
format_pieces.append(', ')
format_pieces.append('col {column}')
if format_pieces:
format_pieces.append(': ')
# Severity and Id information
if 'severity' in comment_data:
format_pieces.append('{severity}: ')
if 'message_id' in comment_data:
format_pieces.append('[{message_id}]: ')
# The message
if 'message' in comment_data:
format_pieces.append('{message}')
return ''.join(format_pieces).format(**comment_data)
def get_vcs_root():
"""Returns the vcs module and the root of the repo.
Returns:
A tuple containing the vcs module to use (git, hg) and the root of the
repository. If no repository exisits then (None, None) is returned.
"""
for vcs in (git, hg):
repo_root = vcs.repository_root()
if repo_root:
return vcs, repo_root
return (None, None)
def process_file(vcs, commit, force, gitlint_config, file_data):
"""Lint the file
Returns:
The results from the linter.
"""
filename, extra_data = file_data
if force:
modified_lines = None
else:
modified_lines = vcs.modified_lines(
filename, extra_data, commit=commit)
result = linters.lint(filename, modified_lines, gitlint_config)
result = result[filename]
return filename, result
|
sk-/git-lint | gitlint/hg.py | last_commit | python | def last_commit():
try:
root = subprocess.check_output(
['hg', 'parent', '--template={node}'],
stderr=subprocess.STDOUT).strip()
# Convert to unicode first
return root.decode('utf-8')
except subprocess.CalledProcessError:
return None | Returns the SHA1 of the last commit. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/hg.py#L33-L42 | null | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to get information from mercurial."""
import os.path
import subprocess
import gitlint.utils as utils
def repository_root():
"""Returns the root of the repository as an absolute path."""
try:
root = subprocess.check_output(
['hg', 'root'], stderr=subprocess.STDOUT).strip()
# Convert to unicode first
return root.decode('utf-8')
except subprocess.CalledProcessError:
return None
def modified_files(root, tracked_only=False, commit=None):
"""Returns a list of files that has been modified since the last commit.
Args:
root: the root of the repository, it has to be an absolute path.
tracked_only: exclude untracked files when True.
commit: SHA1 of the commit. If None, it will get the modified files in the
working copy.
Returns: a dictionary with the modified files as keys, and additional
information as value. In this case it adds the status returned by
hg status.
"""
assert os.path.isabs(root), "Root has to be absolute, got: %s" % root
command = ['hg', 'status']
if commit:
command.append('--change=%s' % commit)
# Convert to unicode and split
status_lines = subprocess.check_output(command).decode('utf-8').split(
os.linesep)
modes = ['M', 'A']
if not tracked_only:
modes.append(r'\?')
modes_str = '|'.join(modes)
modified_file_status = utils.filter_lines(
status_lines,
r'(?P<mode>%s) (?P<filename>.+)' % modes_str,
groups=('filename', 'mode'))
return dict((os.path.join(root, filename), mode)
for filename, mode in modified_file_status)
def modified_lines(filename, extra_data, commit=None):
"""Returns the lines that have been modifed for this file.
Args:
filename: the file to check.
extra_data: is the extra_data returned by modified_files. Additionally, a
value of None means that the file was not modified.
commit: the complete sha1 (40 chars) of the commit. Note that specifying
this value will only work (100%) when commit == last_commit (with
respect to the currently checked out revision), otherwise, we could miss
some lines.
Returns: a list of lines that were modified, or None in case all lines are
new.
"""
if extra_data is None:
return []
if extra_data != 'M':
return None
command = ['hg', 'diff', '-U', '0']
if commit:
command.append('--change=%s' % commit)
command.append(filename)
# Split as bytes, as the output may have some non unicode characters.
diff_lines = subprocess.check_output(command).split(
os.linesep.encode('utf-8'))
diff_line_numbers = utils.filter_lines(
diff_lines,
br'@@ -\d+,\d+ \+(?P<start_line>\d+),(?P<lines>\d+) @@',
groups=('start_line', 'lines'))
modified_line_numbers = []
for start_line, lines in diff_line_numbers:
start_line = int(start_line)
lines = int(lines)
modified_line_numbers.extend(range(start_line, start_line + lines))
return modified_line_numbers
|
sk-/git-lint | gitlint/hg.py | modified_files | python | def modified_files(root, tracked_only=False, commit=None):
assert os.path.isabs(root), "Root has to be absolute, got: %s" % root
command = ['hg', 'status']
if commit:
command.append('--change=%s' % commit)
# Convert to unicode and split
status_lines = subprocess.check_output(command).decode('utf-8').split(
os.linesep)
modes = ['M', 'A']
if not tracked_only:
modes.append(r'\?')
modes_str = '|'.join(modes)
modified_file_status = utils.filter_lines(
status_lines,
r'(?P<mode>%s) (?P<filename>.+)' % modes_str,
groups=('filename', 'mode'))
return dict((os.path.join(root, filename), mode)
for filename, mode in modified_file_status) | Returns a list of files that has been modified since the last commit.
Args:
root: the root of the repository, it has to be an absolute path.
tracked_only: exclude untracked files when True.
commit: SHA1 of the commit. If None, it will get the modified files in the
working copy.
Returns: a dictionary with the modified files as keys, and additional
information as value. In this case it adds the status returned by
hg status. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/hg.py#L45-L79 | [
"def filter_lines(lines, filter_regex, groups=None):\n \"\"\"Filters out the lines not matching the pattern.\n\n Args:\n lines: list[string]: lines to filter.\n pattern: string: regular expression to filter out lines.\n\n Returns: list[string]: the list of filtered lines.\n \"\"\"\n pattern... | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to get information from mercurial."""
import os.path
import subprocess
import gitlint.utils as utils
def repository_root():
"""Returns the root of the repository as an absolute path."""
try:
root = subprocess.check_output(
['hg', 'root'], stderr=subprocess.STDOUT).strip()
# Convert to unicode first
return root.decode('utf-8')
except subprocess.CalledProcessError:
return None
def last_commit():
"""Returns the SHA1 of the last commit."""
try:
root = subprocess.check_output(
['hg', 'parent', '--template={node}'],
stderr=subprocess.STDOUT).strip()
# Convert to unicode first
return root.decode('utf-8')
except subprocess.CalledProcessError:
return None
def modified_lines(filename, extra_data, commit=None):
"""Returns the lines that have been modifed for this file.
Args:
filename: the file to check.
extra_data: is the extra_data returned by modified_files. Additionally, a
value of None means that the file was not modified.
commit: the complete sha1 (40 chars) of the commit. Note that specifying
this value will only work (100%) when commit == last_commit (with
respect to the currently checked out revision), otherwise, we could miss
some lines.
Returns: a list of lines that were modified, or None in case all lines are
new.
"""
if extra_data is None:
return []
if extra_data != 'M':
return None
command = ['hg', 'diff', '-U', '0']
if commit:
command.append('--change=%s' % commit)
command.append(filename)
# Split as bytes, as the output may have some non unicode characters.
diff_lines = subprocess.check_output(command).split(
os.linesep.encode('utf-8'))
diff_line_numbers = utils.filter_lines(
diff_lines,
br'@@ -\d+,\d+ \+(?P<start_line>\d+),(?P<lines>\d+) @@',
groups=('start_line', 'lines'))
modified_line_numbers = []
for start_line, lines in diff_line_numbers:
start_line = int(start_line)
lines = int(lines)
modified_line_numbers.extend(range(start_line, start_line + lines))
return modified_line_numbers
|
sk-/git-lint | gitlint/hg.py | modified_lines | python | def modified_lines(filename, extra_data, commit=None):
if extra_data is None:
return []
if extra_data != 'M':
return None
command = ['hg', 'diff', '-U', '0']
if commit:
command.append('--change=%s' % commit)
command.append(filename)
# Split as bytes, as the output may have some non unicode characters.
diff_lines = subprocess.check_output(command).split(
os.linesep.encode('utf-8'))
diff_line_numbers = utils.filter_lines(
diff_lines,
br'@@ -\d+,\d+ \+(?P<start_line>\d+),(?P<lines>\d+) @@',
groups=('start_line', 'lines'))
modified_line_numbers = []
for start_line, lines in diff_line_numbers:
start_line = int(start_line)
lines = int(lines)
modified_line_numbers.extend(range(start_line, start_line + lines))
return modified_line_numbers | Returns the lines that have been modifed for this file.
Args:
filename: the file to check.
extra_data: is the extra_data returned by modified_files. Additionally, a
value of None means that the file was not modified.
commit: the complete sha1 (40 chars) of the commit. Note that specifying
this value will only work (100%) when commit == last_commit (with
respect to the currently checked out revision), otherwise, we could miss
some lines.
Returns: a list of lines that were modified, or None in case all lines are
new. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/hg.py#L82-L120 | [
"def filter_lines(lines, filter_regex, groups=None):\n \"\"\"Filters out the lines not matching the pattern.\n\n Args:\n lines: list[string]: lines to filter.\n pattern: string: regular expression to filter out lines.\n\n Returns: list[string]: the list of filtered lines.\n \"\"\"\n pattern... | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to get information from mercurial."""
import os.path
import subprocess
import gitlint.utils as utils
def repository_root():
"""Returns the root of the repository as an absolute path."""
try:
root = subprocess.check_output(
['hg', 'root'], stderr=subprocess.STDOUT).strip()
# Convert to unicode first
return root.decode('utf-8')
except subprocess.CalledProcessError:
return None
def last_commit():
"""Returns the SHA1 of the last commit."""
try:
root = subprocess.check_output(
['hg', 'parent', '--template={node}'],
stderr=subprocess.STDOUT).strip()
# Convert to unicode first
return root.decode('utf-8')
except subprocess.CalledProcessError:
return None
def modified_files(root, tracked_only=False, commit=None):
"""Returns a list of files that has been modified since the last commit.
Args:
root: the root of the repository, it has to be an absolute path.
tracked_only: exclude untracked files when True.
commit: SHA1 of the commit. If None, it will get the modified files in the
working copy.
Returns: a dictionary with the modified files as keys, and additional
information as value. In this case it adds the status returned by
hg status.
"""
assert os.path.isabs(root), "Root has to be absolute, got: %s" % root
command = ['hg', 'status']
if commit:
command.append('--change=%s' % commit)
# Convert to unicode and split
status_lines = subprocess.check_output(command).decode('utf-8').split(
os.linesep)
modes = ['M', 'A']
if not tracked_only:
modes.append(r'\?')
modes_str = '|'.join(modes)
modified_file_status = utils.filter_lines(
status_lines,
r'(?P<mode>%s) (?P<filename>.+)' % modes_str,
groups=('filename', 'mode'))
return dict((os.path.join(root, filename), mode)
for filename, mode in modified_file_status)
|
sk-/git-lint | scripts/custom_linters/ini_linter.py | lint | python | def lint(filename):
config = ConfigParser.ConfigParser()
try:
config.read(filename)
return 0
except ConfigParser.Error as error:
print('Error: %s' % error)
return 1
except:
print('Unexpected Error')
return 2 | Lints an INI file, returning 0 in case of success. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/scripts/custom_linters/ini_linter.py#L23-L34 | null | #!/bin/python
# Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple INI linter."""
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import sys
if __name__ == '__main__':
sys.exit(lint(sys.argv[1]))
|
sk-/git-lint | gitlint/linters.py | missing_requirements_command | python | def missing_requirements_command(missing_programs, installation_string,
filename, unused_lines):
verb = 'is'
if len(missing_programs) > 1:
verb = 'are'
return {
filename: {
'skipped': [
'%s %s not installed. %s' % (', '.join(missing_programs), verb,
installation_string)
]
}
} | Pseudo-command to be used when requirements are missing. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/linters.py#L41-L54 | null | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for invoking a lint command."""
import collections
import functools
import os
import os.path
import re
import string
import subprocess
import gitlint.utils as utils
class Partial(functools.partial):
"""Wrapper around functools partial to support equality comparisons."""
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.args == other.args
and self.keywords == other.keywords)
def __repr__(self):
# This method should never be executed, only in failing tests.
return (
'Partial: func: %s, args: %s, kwargs: %s' %
(self.func.__name__, self.args, self.keywords)) # pragma: no cover
# TODO(skreft): add test case for result already in cache.
def lint_command(name, program, arguments, filter_regex, filename, lines):
"""Executes a lint program and filter the output.
Executes the lint tool 'program' with arguments 'arguments' over the file
'filename' returning only those lines matching the regular expression
'filter_regex'.
Args:
name: string: the name of the linter.
program: string: lint program.
arguments: list[string]: extra arguments for the program.
filter_regex: string: regular expression to filter lines.
filename: string: filename to lint.
lines: list[int]|None: list of lines that we want to capture. If None,
then all lines will be captured.
Returns: dict: a dict with the extracted info from the message.
"""
output = utils.get_output_from_cache(name, filename)
if output is None:
call_arguments = [program] + arguments + [filename]
try:
output = subprocess.check_output(
call_arguments, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
output = error.output
except OSError:
return {
filename: {
'error': [('Could not execute "%s".%sMake sure all ' +
'required programs are installed') %
(' '.join(call_arguments), os.linesep)]
}
}
output = output.decode('utf-8')
utils.save_output_in_cache(name, filename, output)
output_lines = output.split(os.linesep)
if lines is None:
lines_regex = r'\d+'
else:
lines_regex = '|'.join(map(str, lines))
lines_regex = '(%s)' % lines_regex
groups = ('line', 'column', 'message', 'severity', 'message_id')
filtered_lines = utils.filter_lines(
output_lines,
filter_regex.format(lines=lines_regex, filename=re.escape(filename)),
groups=groups)
result = []
for data in filtered_lines:
comment = dict(p for p in zip(groups, data) if p[1] is not None)
if 'line' in comment:
comment['line'] = int(comment['line'])
if 'column' in comment:
comment['column'] = int(comment['column'])
if 'severity' in comment:
comment['severity'] = comment['severity'].title()
result.append(comment)
return {filename: {'comments': result}}
def _replace_variables(data, variables):
"""Replace the format variables in all items of data."""
formatter = string.Formatter()
return [formatter.vformat(item, [], variables) for item in data]
# TODO(skreft): validate data['filter'], ie check that only has valid fields.
def parse_yaml_config(yaml_config, repo_home):
"""Converts a dictionary (parsed Yaml) to the internal representation."""
config = collections.defaultdict(list)
variables = {
'DEFAULT_CONFIGS': os.path.join(os.path.dirname(__file__), 'configs'),
'REPO_HOME': repo_home,
}
for name, data in yaml_config.items():
command = _replace_variables([data['command']], variables)[0]
requirements = _replace_variables(
data.get('requirements', []), variables)
arguments = _replace_variables(data.get('arguments', []), variables)
not_found_programs = utils.programs_not_in_path([command] +
requirements)
if not_found_programs:
linter_command = Partial(missing_requirements_command,
not_found_programs, data['installation'])
else:
linter_command = Partial(lint_command, name, command, arguments,
data['filter'])
for extension in data['extensions']:
config[extension].append(linter_command)
return config
def lint(filename, lines, config):
"""Lints a file.
Args:
filename: string: filename to lint.
lines: list[int]|None: list of lines that we want to capture. If None,
then all lines will be captured.
config: dict[string: linter]: mapping from extension to a linter
function.
Returns: dict: if there were errors running the command then the field
'error' will have the reasons in a list. if the lint process was skipped,
then a field 'skipped' will be set with the reasons. Otherwise, the field
'comments' will have the messages.
"""
_, ext = os.path.splitext(filename)
if ext in config:
output = collections.defaultdict(list)
for linter in config[ext]:
linter_output = linter(filename, lines)
for category, values in linter_output[filename].items():
output[category].extend(values)
if 'comments' in output:
output['comments'] = sorted(
output['comments'],
key=lambda x: (x.get('line', -1), x.get('column', -1)))
return {filename: dict(output)}
else:
return {
filename: {
'skipped': [
'no linter is defined or enabled for files'
' with extension "%s"' % ext
]
}
}
|
sk-/git-lint | gitlint/linters.py | lint_command | python | def lint_command(name, program, arguments, filter_regex, filename, lines):
output = utils.get_output_from_cache(name, filename)
if output is None:
call_arguments = [program] + arguments + [filename]
try:
output = subprocess.check_output(
call_arguments, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
output = error.output
except OSError:
return {
filename: {
'error': [('Could not execute "%s".%sMake sure all ' +
'required programs are installed') %
(' '.join(call_arguments), os.linesep)]
}
}
output = output.decode('utf-8')
utils.save_output_in_cache(name, filename, output)
output_lines = output.split(os.linesep)
if lines is None:
lines_regex = r'\d+'
else:
lines_regex = '|'.join(map(str, lines))
lines_regex = '(%s)' % lines_regex
groups = ('line', 'column', 'message', 'severity', 'message_id')
filtered_lines = utils.filter_lines(
output_lines,
filter_regex.format(lines=lines_regex, filename=re.escape(filename)),
groups=groups)
result = []
for data in filtered_lines:
comment = dict(p for p in zip(groups, data) if p[1] is not None)
if 'line' in comment:
comment['line'] = int(comment['line'])
if 'column' in comment:
comment['column'] = int(comment['column'])
if 'severity' in comment:
comment['severity'] = comment['severity'].title()
result.append(comment)
return {filename: {'comments': result}} | Executes a lint program and filter the output.
Executes the lint tool 'program' with arguments 'arguments' over the file
'filename' returning only those lines matching the regular expression
'filter_regex'.
Args:
name: string: the name of the linter.
program: string: lint program.
arguments: list[string]: extra arguments for the program.
filter_regex: string: regular expression to filter lines.
filename: string: filename to lint.
lines: list[int]|None: list of lines that we want to capture. If None,
then all lines will be captured.
Returns: dict: a dict with the extracted info from the message. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/linters.py#L58-L121 | [
"def filter_lines(lines, filter_regex, groups=None):\n \"\"\"Filters out the lines not matching the pattern.\n\n Args:\n lines: list[string]: lines to filter.\n pattern: string: regular expression to filter out lines.\n\n Returns: list[string]: the list of filtered lines.\n \"\"\"\n pattern... | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for invoking a lint command."""
import collections
import functools
import os
import os.path
import re
import string
import subprocess
import gitlint.utils as utils
class Partial(functools.partial):
"""Wrapper around functools partial to support equality comparisons."""
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.args == other.args
and self.keywords == other.keywords)
def __repr__(self):
# This method should never be executed, only in failing tests.
return (
'Partial: func: %s, args: %s, kwargs: %s' %
(self.func.__name__, self.args, self.keywords)) # pragma: no cover
def missing_requirements_command(missing_programs, installation_string,
filename, unused_lines):
"""Pseudo-command to be used when requirements are missing."""
verb = 'is'
if len(missing_programs) > 1:
verb = 'are'
return {
filename: {
'skipped': [
'%s %s not installed. %s' % (', '.join(missing_programs), verb,
installation_string)
]
}
}
# TODO(skreft): add test case for result already in cache.
def _replace_variables(data, variables):
"""Replace the format variables in all items of data."""
formatter = string.Formatter()
return [formatter.vformat(item, [], variables) for item in data]
# TODO(skreft): validate data['filter'], ie check that only has valid fields.
def parse_yaml_config(yaml_config, repo_home):
"""Converts a dictionary (parsed Yaml) to the internal representation."""
config = collections.defaultdict(list)
variables = {
'DEFAULT_CONFIGS': os.path.join(os.path.dirname(__file__), 'configs'),
'REPO_HOME': repo_home,
}
for name, data in yaml_config.items():
command = _replace_variables([data['command']], variables)[0]
requirements = _replace_variables(
data.get('requirements', []), variables)
arguments = _replace_variables(data.get('arguments', []), variables)
not_found_programs = utils.programs_not_in_path([command] +
requirements)
if not_found_programs:
linter_command = Partial(missing_requirements_command,
not_found_programs, data['installation'])
else:
linter_command = Partial(lint_command, name, command, arguments,
data['filter'])
for extension in data['extensions']:
config[extension].append(linter_command)
return config
def lint(filename, lines, config):
"""Lints a file.
Args:
filename: string: filename to lint.
lines: list[int]|None: list of lines that we want to capture. If None,
then all lines will be captured.
config: dict[string: linter]: mapping from extension to a linter
function.
Returns: dict: if there were errors running the command then the field
'error' will have the reasons in a list. if the lint process was skipped,
then a field 'skipped' will be set with the reasons. Otherwise, the field
'comments' will have the messages.
"""
_, ext = os.path.splitext(filename)
if ext in config:
output = collections.defaultdict(list)
for linter in config[ext]:
linter_output = linter(filename, lines)
for category, values in linter_output[filename].items():
output[category].extend(values)
if 'comments' in output:
output['comments'] = sorted(
output['comments'],
key=lambda x: (x.get('line', -1), x.get('column', -1)))
return {filename: dict(output)}
else:
return {
filename: {
'skipped': [
'no linter is defined or enabled for files'
' with extension "%s"' % ext
]
}
}
|
sk-/git-lint | gitlint/linters.py | _replace_variables | python | def _replace_variables(data, variables):
formatter = string.Formatter()
return [formatter.vformat(item, [], variables) for item in data] | Replace the format variables in all items of data. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/linters.py#L124-L127 | null | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for invoking a lint command."""
import collections
import functools
import os
import os.path
import re
import string
import subprocess
import gitlint.utils as utils
class Partial(functools.partial):
"""Wrapper around functools partial to support equality comparisons."""
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.args == other.args
and self.keywords == other.keywords)
def __repr__(self):
# This method should never be executed, only in failing tests.
return (
'Partial: func: %s, args: %s, kwargs: %s' %
(self.func.__name__, self.args, self.keywords)) # pragma: no cover
def missing_requirements_command(missing_programs, installation_string,
filename, unused_lines):
"""Pseudo-command to be used when requirements are missing."""
verb = 'is'
if len(missing_programs) > 1:
verb = 'are'
return {
filename: {
'skipped': [
'%s %s not installed. %s' % (', '.join(missing_programs), verb,
installation_string)
]
}
}
# TODO(skreft): add test case for result already in cache.
def lint_command(name, program, arguments, filter_regex, filename, lines):
"""Executes a lint program and filter the output.
Executes the lint tool 'program' with arguments 'arguments' over the file
'filename' returning only those lines matching the regular expression
'filter_regex'.
Args:
name: string: the name of the linter.
program: string: lint program.
arguments: list[string]: extra arguments for the program.
filter_regex: string: regular expression to filter lines.
filename: string: filename to lint.
lines: list[int]|None: list of lines that we want to capture. If None,
then all lines will be captured.
Returns: dict: a dict with the extracted info from the message.
"""
output = utils.get_output_from_cache(name, filename)
if output is None:
call_arguments = [program] + arguments + [filename]
try:
output = subprocess.check_output(
call_arguments, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
output = error.output
except OSError:
return {
filename: {
'error': [('Could not execute "%s".%sMake sure all ' +
'required programs are installed') %
(' '.join(call_arguments), os.linesep)]
}
}
output = output.decode('utf-8')
utils.save_output_in_cache(name, filename, output)
output_lines = output.split(os.linesep)
if lines is None:
lines_regex = r'\d+'
else:
lines_regex = '|'.join(map(str, lines))
lines_regex = '(%s)' % lines_regex
groups = ('line', 'column', 'message', 'severity', 'message_id')
filtered_lines = utils.filter_lines(
output_lines,
filter_regex.format(lines=lines_regex, filename=re.escape(filename)),
groups=groups)
result = []
for data in filtered_lines:
comment = dict(p for p in zip(groups, data) if p[1] is not None)
if 'line' in comment:
comment['line'] = int(comment['line'])
if 'column' in comment:
comment['column'] = int(comment['column'])
if 'severity' in comment:
comment['severity'] = comment['severity'].title()
result.append(comment)
return {filename: {'comments': result}}
# TODO(skreft): validate data['filter'], ie check that only has valid fields.
def parse_yaml_config(yaml_config, repo_home):
"""Converts a dictionary (parsed Yaml) to the internal representation."""
config = collections.defaultdict(list)
variables = {
'DEFAULT_CONFIGS': os.path.join(os.path.dirname(__file__), 'configs'),
'REPO_HOME': repo_home,
}
for name, data in yaml_config.items():
command = _replace_variables([data['command']], variables)[0]
requirements = _replace_variables(
data.get('requirements', []), variables)
arguments = _replace_variables(data.get('arguments', []), variables)
not_found_programs = utils.programs_not_in_path([command] +
requirements)
if not_found_programs:
linter_command = Partial(missing_requirements_command,
not_found_programs, data['installation'])
else:
linter_command = Partial(lint_command, name, command, arguments,
data['filter'])
for extension in data['extensions']:
config[extension].append(linter_command)
return config
def lint(filename, lines, config):
"""Lints a file.
Args:
filename: string: filename to lint.
lines: list[int]|None: list of lines that we want to capture. If None,
then all lines will be captured.
config: dict[string: linter]: mapping from extension to a linter
function.
Returns: dict: if there were errors running the command then the field
'error' will have the reasons in a list. if the lint process was skipped,
then a field 'skipped' will be set with the reasons. Otherwise, the field
'comments' will have the messages.
"""
_, ext = os.path.splitext(filename)
if ext in config:
output = collections.defaultdict(list)
for linter in config[ext]:
linter_output = linter(filename, lines)
for category, values in linter_output[filename].items():
output[category].extend(values)
if 'comments' in output:
output['comments'] = sorted(
output['comments'],
key=lambda x: (x.get('line', -1), x.get('column', -1)))
return {filename: dict(output)}
else:
return {
filename: {
'skipped': [
'no linter is defined or enabled for files'
' with extension "%s"' % ext
]
}
}
|
sk-/git-lint | gitlint/linters.py | parse_yaml_config | python | def parse_yaml_config(yaml_config, repo_home):
config = collections.defaultdict(list)
variables = {
'DEFAULT_CONFIGS': os.path.join(os.path.dirname(__file__), 'configs'),
'REPO_HOME': repo_home,
}
for name, data in yaml_config.items():
command = _replace_variables([data['command']], variables)[0]
requirements = _replace_variables(
data.get('requirements', []), variables)
arguments = _replace_variables(data.get('arguments', []), variables)
not_found_programs = utils.programs_not_in_path([command] +
requirements)
if not_found_programs:
linter_command = Partial(missing_requirements_command,
not_found_programs, data['installation'])
else:
linter_command = Partial(lint_command, name, command, arguments,
data['filter'])
for extension in data['extensions']:
config[extension].append(linter_command)
return config | Converts a dictionary (parsed Yaml) to the internal representation. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/linters.py#L131-L157 | [
"def _replace_variables(data, variables):\n \"\"\"Replace the format variables in all items of data.\"\"\"\n formatter = string.Formatter()\n return [formatter.vformat(item, [], variables) for item in data]\n",
"def programs_not_in_path(programs):\n \"\"\"Returns all the programs that are not found in... | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for invoking a lint command."""
import collections
import functools
import os
import os.path
import re
import string
import subprocess
import gitlint.utils as utils
class Partial(functools.partial):
"""Wrapper around functools partial to support equality comparisons."""
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.args == other.args
and self.keywords == other.keywords)
def __repr__(self):
# This method should never be executed, only in failing tests.
return (
'Partial: func: %s, args: %s, kwargs: %s' %
(self.func.__name__, self.args, self.keywords)) # pragma: no cover
def missing_requirements_command(missing_programs, installation_string,
filename, unused_lines):
"""Pseudo-command to be used when requirements are missing."""
verb = 'is'
if len(missing_programs) > 1:
verb = 'are'
return {
filename: {
'skipped': [
'%s %s not installed. %s' % (', '.join(missing_programs), verb,
installation_string)
]
}
}
# TODO(skreft): add test case for result already in cache.
def lint_command(name, program, arguments, filter_regex, filename, lines):
"""Executes a lint program and filter the output.
Executes the lint tool 'program' with arguments 'arguments' over the file
'filename' returning only those lines matching the regular expression
'filter_regex'.
Args:
name: string: the name of the linter.
program: string: lint program.
arguments: list[string]: extra arguments for the program.
filter_regex: string: regular expression to filter lines.
filename: string: filename to lint.
lines: list[int]|None: list of lines that we want to capture. If None,
then all lines will be captured.
Returns: dict: a dict with the extracted info from the message.
"""
output = utils.get_output_from_cache(name, filename)
if output is None:
call_arguments = [program] + arguments + [filename]
try:
output = subprocess.check_output(
call_arguments, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
output = error.output
except OSError:
return {
filename: {
'error': [('Could not execute "%s".%sMake sure all ' +
'required programs are installed') %
(' '.join(call_arguments), os.linesep)]
}
}
output = output.decode('utf-8')
utils.save_output_in_cache(name, filename, output)
output_lines = output.split(os.linesep)
if lines is None:
lines_regex = r'\d+'
else:
lines_regex = '|'.join(map(str, lines))
lines_regex = '(%s)' % lines_regex
groups = ('line', 'column', 'message', 'severity', 'message_id')
filtered_lines = utils.filter_lines(
output_lines,
filter_regex.format(lines=lines_regex, filename=re.escape(filename)),
groups=groups)
result = []
for data in filtered_lines:
comment = dict(p for p in zip(groups, data) if p[1] is not None)
if 'line' in comment:
comment['line'] = int(comment['line'])
if 'column' in comment:
comment['column'] = int(comment['column'])
if 'severity' in comment:
comment['severity'] = comment['severity'].title()
result.append(comment)
return {filename: {'comments': result}}
def _replace_variables(data, variables):
"""Replace the format variables in all items of data."""
formatter = string.Formatter()
return [formatter.vformat(item, [], variables) for item in data]
# TODO(skreft): validate data['filter'], ie check that only has valid fields.
def lint(filename, lines, config):
"""Lints a file.
Args:
filename: string: filename to lint.
lines: list[int]|None: list of lines that we want to capture. If None,
then all lines will be captured.
config: dict[string: linter]: mapping from extension to a linter
function.
Returns: dict: if there were errors running the command then the field
'error' will have the reasons in a list. if the lint process was skipped,
then a field 'skipped' will be set with the reasons. Otherwise, the field
'comments' will have the messages.
"""
_, ext = os.path.splitext(filename)
if ext in config:
output = collections.defaultdict(list)
for linter in config[ext]:
linter_output = linter(filename, lines)
for category, values in linter_output[filename].items():
output[category].extend(values)
if 'comments' in output:
output['comments'] = sorted(
output['comments'],
key=lambda x: (x.get('line', -1), x.get('column', -1)))
return {filename: dict(output)}
else:
return {
filename: {
'skipped': [
'no linter is defined or enabled for files'
' with extension "%s"' % ext
]
}
}
|
sk-/git-lint | gitlint/linters.py | lint | python | def lint(filename, lines, config):
_, ext = os.path.splitext(filename)
if ext in config:
output = collections.defaultdict(list)
for linter in config[ext]:
linter_output = linter(filename, lines)
for category, values in linter_output[filename].items():
output[category].extend(values)
if 'comments' in output:
output['comments'] = sorted(
output['comments'],
key=lambda x: (x.get('line', -1), x.get('column', -1)))
return {filename: dict(output)}
else:
return {
filename: {
'skipped': [
'no linter is defined or enabled for files'
' with extension "%s"' % ext
]
}
} | Lints a file.
Args:
filename: string: filename to lint.
lines: list[int]|None: list of lines that we want to capture. If None,
then all lines will be captured.
config: dict[string: linter]: mapping from extension to a linter
function.
Returns: dict: if there were errors running the command then the field
'error' will have the reasons in a list. if the lint process was skipped,
then a field 'skipped' will be set with the reasons. Otherwise, the field
'comments' will have the messages. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/linters.py#L160-L197 | null | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for invoking a lint command."""
import collections
import functools
import os
import os.path
import re
import string
import subprocess
import gitlint.utils as utils
class Partial(functools.partial):
"""Wrapper around functools partial to support equality comparisons."""
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.args == other.args
and self.keywords == other.keywords)
def __repr__(self):
# This method should never be executed, only in failing tests.
return (
'Partial: func: %s, args: %s, kwargs: %s' %
(self.func.__name__, self.args, self.keywords)) # pragma: no cover
def missing_requirements_command(missing_programs, installation_string,
filename, unused_lines):
"""Pseudo-command to be used when requirements are missing."""
verb = 'is'
if len(missing_programs) > 1:
verb = 'are'
return {
filename: {
'skipped': [
'%s %s not installed. %s' % (', '.join(missing_programs), verb,
installation_string)
]
}
}
# TODO(skreft): add test case for result already in cache.
def lint_command(name, program, arguments, filter_regex, filename, lines):
"""Executes a lint program and filter the output.
Executes the lint tool 'program' with arguments 'arguments' over the file
'filename' returning only those lines matching the regular expression
'filter_regex'.
Args:
name: string: the name of the linter.
program: string: lint program.
arguments: list[string]: extra arguments for the program.
filter_regex: string: regular expression to filter lines.
filename: string: filename to lint.
lines: list[int]|None: list of lines that we want to capture. If None,
then all lines will be captured.
Returns: dict: a dict with the extracted info from the message.
"""
output = utils.get_output_from_cache(name, filename)
if output is None:
call_arguments = [program] + arguments + [filename]
try:
output = subprocess.check_output(
call_arguments, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
output = error.output
except OSError:
return {
filename: {
'error': [('Could not execute "%s".%sMake sure all ' +
'required programs are installed') %
(' '.join(call_arguments), os.linesep)]
}
}
output = output.decode('utf-8')
utils.save_output_in_cache(name, filename, output)
output_lines = output.split(os.linesep)
if lines is None:
lines_regex = r'\d+'
else:
lines_regex = '|'.join(map(str, lines))
lines_regex = '(%s)' % lines_regex
groups = ('line', 'column', 'message', 'severity', 'message_id')
filtered_lines = utils.filter_lines(
output_lines,
filter_regex.format(lines=lines_regex, filename=re.escape(filename)),
groups=groups)
result = []
for data in filtered_lines:
comment = dict(p for p in zip(groups, data) if p[1] is not None)
if 'line' in comment:
comment['line'] = int(comment['line'])
if 'column' in comment:
comment['column'] = int(comment['column'])
if 'severity' in comment:
comment['severity'] = comment['severity'].title()
result.append(comment)
return {filename: {'comments': result}}
def _replace_variables(data, variables):
"""Replace the format variables in all items of data."""
formatter = string.Formatter()
return [formatter.vformat(item, [], variables) for item in data]
# TODO(skreft): validate data['filter'], ie check that only has valid fields.
def parse_yaml_config(yaml_config, repo_home):
"""Converts a dictionary (parsed Yaml) to the internal representation."""
config = collections.defaultdict(list)
variables = {
'DEFAULT_CONFIGS': os.path.join(os.path.dirname(__file__), 'configs'),
'REPO_HOME': repo_home,
}
for name, data in yaml_config.items():
command = _replace_variables([data['command']], variables)[0]
requirements = _replace_variables(
data.get('requirements', []), variables)
arguments = _replace_variables(data.get('arguments', []), variables)
not_found_programs = utils.programs_not_in_path([command] +
requirements)
if not_found_programs:
linter_command = Partial(missing_requirements_command,
not_found_programs, data['installation'])
else:
linter_command = Partial(lint_command, name, command, arguments,
data['filter'])
for extension in data['extensions']:
config[extension].append(linter_command)
return config
|
sk-/git-lint | gitlint/git.py | modified_lines | python | def modified_lines(filename, extra_data, commit=None):
if extra_data is None:
return []
if extra_data not in ('M ', ' M', 'MM'):
return None
if commit is None:
commit = '0' * 40
commit = commit.encode('utf-8')
# Split as bytes, as the output may have some non unicode characters.
blame_lines = subprocess.check_output(
['git', 'blame', '--porcelain', filename]).split(
os.linesep.encode('utf-8'))
modified_line_numbers = utils.filter_lines(
blame_lines, commit + br' (?P<line>\d+) (\d+)', groups=('line', ))
return list(map(int, modified_line_numbers)) | Returns the lines that have been modifed for this file.
Args:
filename: the file to check.
extra_data: is the extra_data returned by modified_files. Additionally, a
value of None means that the file was not modified.
commit: the complete sha1 (40 chars) of the commit. Note that specifying
this value will only work (100%) when commit == last_commit (with
respect to the currently checked out revision), otherwise, we could miss
some lines.
Returns: a list of lines that were modified, or None in case all lines are
new. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/git.py#L109-L140 | [
"def filter_lines(lines, filter_regex, groups=None):\n \"\"\"Filters out the lines not matching the pattern.\n\n Args:\n lines: list[string]: lines to filter.\n pattern: string: regular expression to filter out lines.\n\n Returns: list[string]: the list of filtered lines.\n \"\"\"\n pattern... | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to get information from git."""
import os.path
import subprocess
import gitlint.utils as utils
def repository_root():
"""Returns the root of the repository as an absolute path."""
try:
root = subprocess.check_output(
['git', 'rev-parse', '--show-toplevel'],
stderr=subprocess.STDOUT).strip()
# Convert to unicode first
return root.decode('utf-8')
except subprocess.CalledProcessError:
return None
def last_commit():
"""Returns the SHA1 of the last commit."""
try:
root = subprocess.check_output(
['git', 'rev-parse', 'HEAD'], stderr=subprocess.STDOUT).strip()
# Convert to unicode first
return root.decode('utf-8')
except subprocess.CalledProcessError:
return None
def _remove_filename_quotes(filename):
"""Removes the quotes from a filename returned by git status."""
if filename.startswith('"') and filename.endswith('"'):
return filename[1:-1]
return filename
def modified_files(root, tracked_only=False, commit=None):
"""Returns a list of files that has been modified since the last commit.
Args:
root: the root of the repository, it has to be an absolute path.
tracked_only: exclude untracked files when True.
commit: SHA1 of the commit. If None, it will get the modified files in the
working copy.
Returns: a dictionary with the modified files as keys, and additional
information as value. In this case it adds the status returned by
git status.
"""
assert os.path.isabs(root), "Root has to be absolute, got: %s" % root
if commit:
return _modified_files_with_commit(root, commit)
# Convert to unicode and split
status_lines = subprocess.check_output([
'git', 'status', '--porcelain', '--untracked-files=all',
'--ignore-submodules=all'
]).decode('utf-8').split(os.linesep)
modes = ['M ', ' M', 'A ', 'AM', 'MM']
if not tracked_only:
modes.append(r'\?\?')
modes_str = '|'.join(modes)
modified_file_status = utils.filter_lines(
status_lines,
r'(?P<mode>%s) (?P<filename>.+)' % modes_str,
groups=('filename', 'mode'))
return dict((os.path.join(root, _remove_filename_quotes(filename)), mode)
for filename, mode in modified_file_status)
def _modified_files_with_commit(root, commit):
# Convert to unicode and split
status_lines = subprocess.check_output([
'git', 'diff-tree', '-r', '--root', '--no-commit-id', '--name-status',
commit
]).decode('utf-8').split(os.linesep)
modified_file_status = utils.filter_lines(
status_lines,
r'(?P<mode>A|M)\s(?P<filename>.+)',
groups=('filename', 'mode'))
# We need to add a space to the mode, so to be compatible with the output
# generated by modified files.
return dict((os.path.join(root, _remove_filename_quotes(filename)),
mode + ' ') for filename, mode in modified_file_status)
|
sk-/git-lint | gitlint/utils.py | filter_lines | python | def filter_lines(lines, filter_regex, groups=None):
pattern = re.compile(filter_regex)
for line in lines:
match = pattern.search(line)
if match:
if groups is None:
yield line
elif len(groups) == 1:
yield match.group(groups[0])
else:
matched_groups = match.groupdict()
yield tuple(matched_groups.get(group) for group in groups) | Filters out the lines not matching the pattern.
Args:
lines: list[string]: lines to filter.
pattern: string: regular expression to filter out lines.
Returns: list[string]: the list of filtered lines. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/utils.py#L24-L43 | null | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common function used across modules."""
import io
import os
import re
# This can be just pathlib when 2.7 and 3.4 support is dropped.
import pathlib2 as pathlib
# TODO(skreft): add test
def which(program):
"""Returns a list of paths where the program is found."""
if (os.path.isabs(program) and os.path.isfile(program)
and os.access(program, os.X_OK)):
return [program]
candidates = []
locations = os.environ.get("PATH").split(os.pathsep)
for location in locations:
candidate = os.path.join(location, program)
if os.path.isfile(candidate) and os.access(candidate, os.X_OK):
candidates.append(candidate)
return candidates
def programs_not_in_path(programs):
"""Returns all the programs that are not found in the PATH."""
return [program for program in programs if not which(program)]
def _open_for_write(filename):
"""Opens filename for writing, creating the directories if needed."""
dirname = os.path.dirname(filename)
pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)
return io.open(filename, 'w')
def _get_cache_filename(name, filename):
"""Returns the cache location for filename and linter name."""
filename = os.path.abspath(filename)[1:]
home_folder = os.path.expanduser('~')
base_cache_dir = os.path.join(home_folder, '.git-lint', 'cache')
return os.path.join(base_cache_dir, name, filename)
def get_output_from_cache(name, filename):
"""Returns the output from the cache if still valid.
It checks that the cache file is defined and that its modification time is
after the modification time of the original file.
Args:
name: string: name of the linter.
filename: string: path of the filename for which we are retrieving the
output.
Returns: a string with the output, if it is still valid, or None otherwise.
"""
cache_filename = _get_cache_filename(name, filename)
if (os.path.exists(cache_filename)
and os.path.getmtime(filename) < os.path.getmtime(cache_filename)):
with io.open(cache_filename) as f:
return f.read()
return None
def save_output_in_cache(name, filename, output):
"""Saves output in the cache location.
Args:
name: string: name of the linter.
filename: string: path of the filename for which we are saving the output.
output: string: full output (not yet filetered) of the lint command.
"""
cache_filename = _get_cache_filename(name, filename)
with _open_for_write(cache_filename) as f:
f.write(output)
|
sk-/git-lint | gitlint/utils.py | which | python | def which(program):
if (os.path.isabs(program) and os.path.isfile(program)
and os.access(program, os.X_OK)):
return [program]
candidates = []
locations = os.environ.get("PATH").split(os.pathsep)
for location in locations:
candidate = os.path.join(location, program)
if os.path.isfile(candidate) and os.access(candidate, os.X_OK):
candidates.append(candidate)
return candidates | Returns a list of paths where the program is found. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/utils.py#L47-L59 | null | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common function used across modules."""
import io
import os
import re
# This can be just pathlib when 2.7 and 3.4 support is dropped.
import pathlib2 as pathlib
def filter_lines(lines, filter_regex, groups=None):
"""Filters out the lines not matching the pattern.
Args:
lines: list[string]: lines to filter.
pattern: string: regular expression to filter out lines.
Returns: list[string]: the list of filtered lines.
"""
pattern = re.compile(filter_regex)
for line in lines:
match = pattern.search(line)
if match:
if groups is None:
yield line
elif len(groups) == 1:
yield match.group(groups[0])
else:
matched_groups = match.groupdict()
yield tuple(matched_groups.get(group) for group in groups)
# TODO(skreft): add test
def programs_not_in_path(programs):
"""Returns all the programs that are not found in the PATH."""
return [program for program in programs if not which(program)]
def _open_for_write(filename):
"""Opens filename for writing, creating the directories if needed."""
dirname = os.path.dirname(filename)
pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)
return io.open(filename, 'w')
def _get_cache_filename(name, filename):
"""Returns the cache location for filename and linter name."""
filename = os.path.abspath(filename)[1:]
home_folder = os.path.expanduser('~')
base_cache_dir = os.path.join(home_folder, '.git-lint', 'cache')
return os.path.join(base_cache_dir, name, filename)
def get_output_from_cache(name, filename):
"""Returns the output from the cache if still valid.
It checks that the cache file is defined and that its modification time is
after the modification time of the original file.
Args:
name: string: name of the linter.
filename: string: path of the filename for which we are retrieving the
output.
Returns: a string with the output, if it is still valid, or None otherwise.
"""
cache_filename = _get_cache_filename(name, filename)
if (os.path.exists(cache_filename)
and os.path.getmtime(filename) < os.path.getmtime(cache_filename)):
with io.open(cache_filename) as f:
return f.read()
return None
def save_output_in_cache(name, filename, output):
"""Saves output in the cache location.
Args:
name: string: name of the linter.
filename: string: path of the filename for which we are saving the output.
output: string: full output (not yet filetered) of the lint command.
"""
cache_filename = _get_cache_filename(name, filename)
with _open_for_write(cache_filename) as f:
f.write(output)
|
sk-/git-lint | gitlint/utils.py | _open_for_write | python | def _open_for_write(filename):
dirname = os.path.dirname(filename)
pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)
return io.open(filename, 'w') | Opens filename for writing, creating the directories if needed. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/utils.py#L67-L72 | null | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common function used across modules."""
import io
import os
import re
# This can be just pathlib when 2.7 and 3.4 support is dropped.
import pathlib2 as pathlib
def filter_lines(lines, filter_regex, groups=None):
"""Filters out the lines not matching the pattern.
Args:
lines: list[string]: lines to filter.
pattern: string: regular expression to filter out lines.
Returns: list[string]: the list of filtered lines.
"""
pattern = re.compile(filter_regex)
for line in lines:
match = pattern.search(line)
if match:
if groups is None:
yield line
elif len(groups) == 1:
yield match.group(groups[0])
else:
matched_groups = match.groupdict()
yield tuple(matched_groups.get(group) for group in groups)
# TODO(skreft): add test
def which(program):
"""Returns a list of paths where the program is found."""
if (os.path.isabs(program) and os.path.isfile(program)
and os.access(program, os.X_OK)):
return [program]
candidates = []
locations = os.environ.get("PATH").split(os.pathsep)
for location in locations:
candidate = os.path.join(location, program)
if os.path.isfile(candidate) and os.access(candidate, os.X_OK):
candidates.append(candidate)
return candidates
def programs_not_in_path(programs):
"""Returns all the programs that are not found in the PATH."""
return [program for program in programs if not which(program)]
def _get_cache_filename(name, filename):
"""Returns the cache location for filename and linter name."""
filename = os.path.abspath(filename)[1:]
home_folder = os.path.expanduser('~')
base_cache_dir = os.path.join(home_folder, '.git-lint', 'cache')
return os.path.join(base_cache_dir, name, filename)
def get_output_from_cache(name, filename):
"""Returns the output from the cache if still valid.
It checks that the cache file is defined and that its modification time is
after the modification time of the original file.
Args:
name: string: name of the linter.
filename: string: path of the filename for which we are retrieving the
output.
Returns: a string with the output, if it is still valid, or None otherwise.
"""
cache_filename = _get_cache_filename(name, filename)
if (os.path.exists(cache_filename)
and os.path.getmtime(filename) < os.path.getmtime(cache_filename)):
with io.open(cache_filename) as f:
return f.read()
return None
def save_output_in_cache(name, filename, output):
"""Saves output in the cache location.
Args:
name: string: name of the linter.
filename: string: path of the filename for which we are saving the output.
output: string: full output (not yet filetered) of the lint command.
"""
cache_filename = _get_cache_filename(name, filename)
with _open_for_write(cache_filename) as f:
f.write(output)
|
sk-/git-lint | gitlint/utils.py | _get_cache_filename | python | def _get_cache_filename(name, filename):
filename = os.path.abspath(filename)[1:]
home_folder = os.path.expanduser('~')
base_cache_dir = os.path.join(home_folder, '.git-lint', 'cache')
return os.path.join(base_cache_dir, name, filename) | Returns the cache location for filename and linter name. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/utils.py#L75-L81 | null | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common function used across modules."""
import io
import os
import re
# This can be just pathlib when 2.7 and 3.4 support is dropped.
import pathlib2 as pathlib
def filter_lines(lines, filter_regex, groups=None):
"""Filters out the lines not matching the pattern.
Args:
lines: list[string]: lines to filter.
pattern: string: regular expression to filter out lines.
Returns: list[string]: the list of filtered lines.
"""
pattern = re.compile(filter_regex)
for line in lines:
match = pattern.search(line)
if match:
if groups is None:
yield line
elif len(groups) == 1:
yield match.group(groups[0])
else:
matched_groups = match.groupdict()
yield tuple(matched_groups.get(group) for group in groups)
# TODO(skreft): add test
def which(program):
"""Returns a list of paths where the program is found."""
if (os.path.isabs(program) and os.path.isfile(program)
and os.access(program, os.X_OK)):
return [program]
candidates = []
locations = os.environ.get("PATH").split(os.pathsep)
for location in locations:
candidate = os.path.join(location, program)
if os.path.isfile(candidate) and os.access(candidate, os.X_OK):
candidates.append(candidate)
return candidates
def programs_not_in_path(programs):
"""Returns all the programs that are not found in the PATH."""
return [program for program in programs if not which(program)]
def _open_for_write(filename):
"""Opens filename for writing, creating the directories if needed."""
dirname = os.path.dirname(filename)
pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)
return io.open(filename, 'w')
def get_output_from_cache(name, filename):
"""Returns the output from the cache if still valid.
It checks that the cache file is defined and that its modification time is
after the modification time of the original file.
Args:
name: string: name of the linter.
filename: string: path of the filename for which we are retrieving the
output.
Returns: a string with the output, if it is still valid, or None otherwise.
"""
cache_filename = _get_cache_filename(name, filename)
if (os.path.exists(cache_filename)
and os.path.getmtime(filename) < os.path.getmtime(cache_filename)):
with io.open(cache_filename) as f:
return f.read()
return None
def save_output_in_cache(name, filename, output):
"""Saves output in the cache location.
Args:
name: string: name of the linter.
filename: string: path of the filename for which we are saving the output.
output: string: full output (not yet filetered) of the lint command.
"""
cache_filename = _get_cache_filename(name, filename)
with _open_for_write(cache_filename) as f:
f.write(output)
|
sk-/git-lint | gitlint/utils.py | get_output_from_cache | python | def get_output_from_cache(name, filename):
cache_filename = _get_cache_filename(name, filename)
if (os.path.exists(cache_filename)
and os.path.getmtime(filename) < os.path.getmtime(cache_filename)):
with io.open(cache_filename) as f:
return f.read()
return None | Returns the output from the cache if still valid.
It checks that the cache file is defined and that its modification time is
after the modification time of the original file.
Args:
name: string: name of the linter.
filename: string: path of the filename for which we are retrieving the
output.
Returns: a string with the output, if it is still valid, or None otherwise. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/utils.py#L84-L103 | [
"def _get_cache_filename(name, filename):\n \"\"\"Returns the cache location for filename and linter name.\"\"\"\n filename = os.path.abspath(filename)[1:]\n home_folder = os.path.expanduser('~')\n base_cache_dir = os.path.join(home_folder, '.git-lint', 'cache')\n\n return os.path.join(base_cache_dir... | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common function used across modules."""
import io
import os
import re
# This can be just pathlib when 2.7 and 3.4 support is dropped.
import pathlib2 as pathlib
def filter_lines(lines, filter_regex, groups=None):
"""Filters out the lines not matching the pattern.
Args:
lines: list[string]: lines to filter.
pattern: string: regular expression to filter out lines.
Returns: list[string]: the list of filtered lines.
"""
pattern = re.compile(filter_regex)
for line in lines:
match = pattern.search(line)
if match:
if groups is None:
yield line
elif len(groups) == 1:
yield match.group(groups[0])
else:
matched_groups = match.groupdict()
yield tuple(matched_groups.get(group) for group in groups)
# TODO(skreft): add test
def which(program):
"""Returns a list of paths where the program is found."""
if (os.path.isabs(program) and os.path.isfile(program)
and os.access(program, os.X_OK)):
return [program]
candidates = []
locations = os.environ.get("PATH").split(os.pathsep)
for location in locations:
candidate = os.path.join(location, program)
if os.path.isfile(candidate) and os.access(candidate, os.X_OK):
candidates.append(candidate)
return candidates
def programs_not_in_path(programs):
"""Returns all the programs that are not found in the PATH."""
return [program for program in programs if not which(program)]
def _open_for_write(filename):
"""Opens filename for writing, creating the directories if needed."""
dirname = os.path.dirname(filename)
pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)
return io.open(filename, 'w')
def _get_cache_filename(name, filename):
"""Returns the cache location for filename and linter name."""
filename = os.path.abspath(filename)[1:]
home_folder = os.path.expanduser('~')
base_cache_dir = os.path.join(home_folder, '.git-lint', 'cache')
return os.path.join(base_cache_dir, name, filename)
def save_output_in_cache(name, filename, output):
"""Saves output in the cache location.
Args:
name: string: name of the linter.
filename: string: path of the filename for which we are saving the output.
output: string: full output (not yet filetered) of the lint command.
"""
cache_filename = _get_cache_filename(name, filename)
with _open_for_write(cache_filename) as f:
f.write(output)
|
sk-/git-lint | gitlint/utils.py | save_output_in_cache | python | def save_output_in_cache(name, filename, output):
cache_filename = _get_cache_filename(name, filename)
with _open_for_write(cache_filename) as f:
f.write(output) | Saves output in the cache location.
Args:
name: string: name of the linter.
filename: string: path of the filename for which we are saving the output.
output: string: full output (not yet filetered) of the lint command. | train | https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/utils.py#L106-L116 | [
"def _open_for_write(filename):\n \"\"\"Opens filename for writing, creating the directories if needed.\"\"\"\n dirname = os.path.dirname(filename)\n pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)\n\n return io.open(filename, 'w')\n",
"def _get_cache_filename(name, filename):\n \"\"\"Retu... | # Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common function used across modules."""
import io
import os
import re
# This can be just pathlib when 2.7 and 3.4 support is dropped.
import pathlib2 as pathlib
def filter_lines(lines, filter_regex, groups=None):
"""Filters out the lines not matching the pattern.
Args:
lines: list[string]: lines to filter.
pattern: string: regular expression to filter out lines.
Returns: list[string]: the list of filtered lines.
"""
pattern = re.compile(filter_regex)
for line in lines:
match = pattern.search(line)
if match:
if groups is None:
yield line
elif len(groups) == 1:
yield match.group(groups[0])
else:
matched_groups = match.groupdict()
yield tuple(matched_groups.get(group) for group in groups)
# TODO(skreft): add test
def which(program):
"""Returns a list of paths where the program is found."""
if (os.path.isabs(program) and os.path.isfile(program)
and os.access(program, os.X_OK)):
return [program]
candidates = []
locations = os.environ.get("PATH").split(os.pathsep)
for location in locations:
candidate = os.path.join(location, program)
if os.path.isfile(candidate) and os.access(candidate, os.X_OK):
candidates.append(candidate)
return candidates
def programs_not_in_path(programs):
"""Returns all the programs that are not found in the PATH."""
return [program for program in programs if not which(program)]
def _open_for_write(filename):
"""Opens filename for writing, creating the directories if needed."""
dirname = os.path.dirname(filename)
pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)
return io.open(filename, 'w')
def _get_cache_filename(name, filename):
"""Returns the cache location for filename and linter name."""
filename = os.path.abspath(filename)[1:]
home_folder = os.path.expanduser('~')
base_cache_dir = os.path.join(home_folder, '.git-lint', 'cache')
return os.path.join(base_cache_dir, name, filename)
def get_output_from_cache(name, filename):
"""Returns the output from the cache if still valid.
It checks that the cache file is defined and that its modification time is
after the modification time of the original file.
Args:
name: string: name of the linter.
filename: string: path of the filename for which we are retrieving the
output.
Returns: a string with the output, if it is still valid, or None otherwise.
"""
cache_filename = _get_cache_filename(name, filename)
if (os.path.exists(cache_filename)
and os.path.getmtime(filename) < os.path.getmtime(cache_filename)):
with io.open(cache_filename) as f:
return f.read()
return None
|
opencivicdata/pupa | pupa/importers/base.py | omnihash | python | def omnihash(obj):
if isinstance(obj, set):
return hash(frozenset(omnihash(e) for e in obj))
elif isinstance(obj, (tuple, list)):
return hash(tuple(omnihash(e) for e in obj))
elif isinstance(obj, dict):
return hash(frozenset((k, omnihash(v)) for k, v in obj.items()))
else:
return hash(obj) | recursively hash unhashable objects | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/importers/base.py#L18-L27 | null | import os
import copy
import glob
import json
import logging
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
from opencivicdata.legislative.models import LegislativeSession
from pupa import settings
from pupa.exceptions import DuplicateItemError
from pupa.utils import get_pseudo_id, utcnow
from pupa.exceptions import UnresolvedIdError, DataImportError
from pupa.models import Identifier
def items_differ(jsonitems, dbitems, subfield_dict):
""" check whether or not jsonitems and dbitems differ """
# short circuit common cases
if len(jsonitems) == len(dbitems) == 0:
# both are empty
return False
elif len(jsonitems) != len(dbitems):
# if lengths differ, they're definitely different
return True
original_jsonitems = jsonitems
jsonitems = copy.deepcopy(jsonitems)
keys = jsonitems[0].keys()
# go over dbitems looking for matches
for dbitem in dbitems:
order = getattr(dbitem, 'order', None)
match = None
for i, jsonitem in enumerate(jsonitems):
# check if all keys (excluding subfields) match
for k in keys:
if k not in subfield_dict and getattr(dbitem, k) != jsonitem.get(k, None):
break
else:
# all fields match so far, possibly equal, just check subfields now
for k in subfield_dict:
jsonsubitems = jsonitem[k]
dbsubitems = list(getattr(dbitem, k).all())
if items_differ(jsonsubitems, dbsubitems, subfield_dict[k][2]):
break
else:
# if the dbitem sets 'order', then the order matters
if order is not None and int(order) != original_jsonitems.index(jsonitem):
break
# these items are equal, so let's mark it for removal
match = i
break
if match is not None:
# item exists in both, remove from jsonitems
jsonitems.pop(match)
else:
# exists in db but not json
return True
# if we get here, jsonitems has to be empty because we asserted that the length was
# the same and we found a match for each thing in dbitems, here's a safety check just in case
if jsonitems: # pragma: no cover
return True
return False
class BaseImporter(object):
""" BaseImporter
Override:
get_object(data)
limit_spec(spec) [optional, required if pseudo_ids are used]
prepare_for_db(data) [optional]
postimport() [optional]
"""
_type = None
model_class = None
related_models = {}
preserve_order = set()
merge_related = {}
cached_transformers = {}
def __init__(self, jurisdiction_id):
self.jurisdiction_id = jurisdiction_id
self.json_to_db_id = {}
self.duplicates = {}
self.pseudo_id_cache = {}
self.session_cache = {}
self.logger = logging.getLogger("pupa")
self.info = self.logger.info
self.debug = self.logger.debug
self.warning = self.logger.warning
self.error = self.logger.error
self.critical = self.logger.critical
# load transformers from appropriate setting
if settings.IMPORT_TRANSFORMERS.get(self._type):
self.cached_transformers = settings.IMPORT_TRANSFORMERS[self._type]
def get_session_id(self, identifier):
if identifier not in self.session_cache:
self.session_cache[identifier] = LegislativeSession.objects.get(
identifier=identifier, jurisdiction_id=self.jurisdiction_id).id
return self.session_cache[identifier]
# no-ops to be overriden
def prepare_for_db(self, data):
return data
def postimport(self):
pass
def resolve_json_id(self, json_id, allow_no_match=False):
"""
Given an id found in scraped JSON, return a DB id for the object.
params:
json_id: id from json
allow_no_match: just return None if id can't be resolved
returns:
database id
raises:
ValueError if id couldn't be resolved
"""
if not json_id:
return None
if json_id.startswith('~'):
# keep caches of all the pseudo-ids to avoid doing 1000s of lookups during import
if json_id not in self.pseudo_id_cache:
spec = get_pseudo_id(json_id)
spec = self.limit_spec(spec)
if isinstance(spec, Q):
objects = self.model_class.objects.filter(spec)
else:
objects = self.model_class.objects.filter(**spec)
ids = {each.id for each in objects}
if len(ids) == 1:
self.pseudo_id_cache[json_id] = ids.pop()
errmsg = None
elif not ids:
errmsg = 'cannot resolve pseudo id to {}: {}'.format(
self.model_class.__name__, json_id)
else:
errmsg = 'multiple objects returned for {} pseudo id {}: {}'.format(
self.model_class.__name__, json_id, ids)
# either raise or log error
if errmsg:
if not allow_no_match:
raise UnresolvedIdError(errmsg)
else:
self.error(errmsg)
self.pseudo_id_cache[json_id] = None
# return the cached object
return self.pseudo_id_cache[json_id]
# get the id that the duplicate points to, or use self
json_id = self.duplicates.get(json_id, json_id)
try:
return self.json_to_db_id[json_id]
except KeyError:
raise UnresolvedIdError('cannot resolve id: {}'.format(json_id))
def import_directory(self, datadir):
""" import a JSON directory into the database """
def json_stream():
# load all json, mapped by json_id
for fname in glob.glob(os.path.join(datadir, self._type + '_*.json')):
with open(fname) as f:
yield json.load(f)
return self.import_data(json_stream())
def _prepare_imports(self, dicts):
""" filters the import stream to remove duplicates
also serves as a good place to override if anything special has to be done to the
order of the import stream (see OrganizationImporter)
"""
# hash(json): id
seen_hashes = {}
for data in dicts:
json_id = data.pop('_id')
# map duplicates (using omnihash to tell if json dicts are identical-ish)
objhash = omnihash(data)
if objhash not in seen_hashes:
seen_hashes[objhash] = json_id
yield json_id, data
else:
self.duplicates[json_id] = seen_hashes[objhash]
def import_data(self, data_items):
""" import a bunch of dicts together """
# keep counts of all actions
record = {
'insert': 0, 'update': 0, 'noop': 0,
'start': utcnow(),
'records': {
'insert': [],
'update': [],
'noop': [],
}
}
for json_id, data in self._prepare_imports(data_items):
obj_id, what = self.import_item(data)
self.json_to_db_id[json_id] = obj_id
record['records'][what].append(obj_id)
record[what] += 1
# all objects are loaded, a perfect time to do inter-object resolution and other tasks
self.postimport()
record['end'] = utcnow()
return {self._type: record}
def import_item(self, data):
""" function used by import_data """
what = 'noop'
# remove the JSON _id (may still be there if called directly)
data.pop('_id', None)
# add fields/etc.
data = self.apply_transformers(data)
data = self.prepare_for_db(data)
try:
obj = self.get_object(data)
except self.model_class.DoesNotExist:
obj = None
# remove pupa_id which does not belong in the OCD data models
pupa_id = data.pop('pupa_id', None)
# pull related fields off
related = {}
for field in self.related_models:
related[field] = data.pop(field)
# obj existed, check if we need to do an update
if obj:
if obj.id in self.json_to_db_id.values():
raise DuplicateItemError(data, obj, related.get('sources', []))
# check base object for changes
for key, value in data.items():
if getattr(obj, key) != value and key not in obj.locked_fields:
setattr(obj, key, value)
what = 'update'
updated = self._update_related(obj, related, self.related_models)
if updated:
what = 'update'
if what == 'update':
obj.save()
# need to create the data
else:
what = 'insert'
try:
obj = self.model_class.objects.create(**data)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, data,
self.model_class))
self._create_related(obj, related, self.related_models)
if pupa_id:
Identifier.objects.get_or_create(identifier=pupa_id,
jurisdiction_id=self.jurisdiction_id,
defaults={'content_object': obj})
return obj.id, what
def _update_related(self, obj, related, subfield_dict):
"""
update DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
# keep track of whether or not anything was updated
updated = False
# for each related field - check if there are differences
for field, items in related.items():
# skip subitem check if it's locked anyway
if field in obj.locked_fields:
continue
# get items from database
dbitems = list(getattr(obj, field).all())
dbitems_count = len(dbitems)
# default to doing nothing
do_delete = do_update = False
if items and dbitems_count: # we have items, so does db, check for conflict
do_delete = do_update = items_differ(items, dbitems, subfield_dict[field][2])
elif items and not dbitems_count: # we have items, db doesn't, just update
do_update = True
elif not items and dbitems_count: # db has items, we don't, just delete
do_delete = True
# otherwise: no items or dbitems, so nothing is done
# don't delete if field is in merge_related
if field in self.merge_related:
new_items = []
# build a list of keyfields to existing database objects
keylist = self.merge_related[field]
keyed_dbitems = {tuple(getattr(item, k) for k in keylist):
item for item in dbitems}
# go through 'new' items
# if item with the same keyfields exists:
# update the database item w/ the new item's properties
# else:
# add it to new_items
for item in items:
key = tuple(item.get(k) for k in keylist)
dbitem = keyed_dbitems.get(key)
if not dbitem:
new_items.append(item)
else:
# update dbitem
for fname, val in item.items():
setattr(dbitem, fname, val)
dbitem.save()
# import anything that made it to new_items in the usual fashion
self._create_related(obj, {field: new_items}, subfield_dict)
else:
# default logic is to just wipe and recreate subobjects
if do_delete:
updated = True
getattr(obj, field).all().delete()
if do_update:
updated = True
self._create_related(obj, {field: items}, subfield_dict)
return updated
def _create_related(self, obj, related, subfield_dict):
"""
create DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
for field, items in related.items():
subobjects = []
all_subrelated = []
Subtype, reverse_id_field, subsubdict = subfield_dict[field]
for order, item in enumerate(items):
# pull off 'subrelated' (things that are related to this obj)
subrelated = {}
for subfield in subsubdict:
subrelated[subfield] = item.pop(subfield)
if field in self.preserve_order:
item['order'] = order
item[reverse_id_field] = obj.id
try:
subobjects.append(Subtype(**item))
all_subrelated.append(subrelated)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, item, Subtype))
# add all subobjects at once (really great for actions & votes)
try:
Subtype.objects.bulk_create(subobjects)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, subobjects, Subtype))
# after import the subobjects, import their subsubobjects
for subobj, subrel in zip(subobjects, all_subrelated):
self._create_related(subobj, subrel, subsubdict)
def lookup_obj_id(self, pupa_id, model):
content_type = ContentType.objects.get_for_model(model)
try:
obj_id = Identifier.objects.get(identifier=pupa_id,
content_type=content_type,
jurisdiction_id=self.jurisdiction_id).object_id
except Identifier.DoesNotExist:
obj_id = None
return obj_id
def apply_transformers(self, data, transformers=None):
if transformers is None:
transformers = self.cached_transformers
for key, key_transformers in transformers.items():
if key not in data:
continue
if isinstance(key_transformers, list):
for transformer in key_transformers:
data[key] = transformer(data[key])
elif isinstance(key_transformers, dict):
self.apply_transformers(data[key], key_transformers)
else:
data[key] = key_transformers(data[key])
return data
def get_seen_sessions(self):
return self.session_cache.values()
|
opencivicdata/pupa | pupa/importers/base.py | items_differ | python | def items_differ(jsonitems, dbitems, subfield_dict):
# short circuit common cases
if len(jsonitems) == len(dbitems) == 0:
# both are empty
return False
elif len(jsonitems) != len(dbitems):
# if lengths differ, they're definitely different
return True
original_jsonitems = jsonitems
jsonitems = copy.deepcopy(jsonitems)
keys = jsonitems[0].keys()
# go over dbitems looking for matches
for dbitem in dbitems:
order = getattr(dbitem, 'order', None)
match = None
for i, jsonitem in enumerate(jsonitems):
# check if all keys (excluding subfields) match
for k in keys:
if k not in subfield_dict and getattr(dbitem, k) != jsonitem.get(k, None):
break
else:
# all fields match so far, possibly equal, just check subfields now
for k in subfield_dict:
jsonsubitems = jsonitem[k]
dbsubitems = list(getattr(dbitem, k).all())
if items_differ(jsonsubitems, dbsubitems, subfield_dict[k][2]):
break
else:
# if the dbitem sets 'order', then the order matters
if order is not None and int(order) != original_jsonitems.index(jsonitem):
break
# these items are equal, so let's mark it for removal
match = i
break
if match is not None:
# item exists in both, remove from jsonitems
jsonitems.pop(match)
else:
# exists in db but not json
return True
# if we get here, jsonitems has to be empty because we asserted that the length was
# the same and we found a match for each thing in dbitems, here's a safety check just in case
if jsonitems: # pragma: no cover
return True
return False | check whether or not jsonitems and dbitems differ | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/importers/base.py#L30-L81 | [
"def items_differ(jsonitems, dbitems, subfield_dict):\n \"\"\" check whether or not jsonitems and dbitems differ \"\"\"\n\n # short circuit common cases\n if len(jsonitems) == len(dbitems) == 0:\n # both are empty\n return False\n elif len(jsonitems) != len(dbitems):\n # if lengths ... | import os
import copy
import glob
import json
import logging
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
from opencivicdata.legislative.models import LegislativeSession
from pupa import settings
from pupa.exceptions import DuplicateItemError
from pupa.utils import get_pseudo_id, utcnow
from pupa.exceptions import UnresolvedIdError, DataImportError
from pupa.models import Identifier
def omnihash(obj):
""" recursively hash unhashable objects """
if isinstance(obj, set):
return hash(frozenset(omnihash(e) for e in obj))
elif isinstance(obj, (tuple, list)):
return hash(tuple(omnihash(e) for e in obj))
elif isinstance(obj, dict):
return hash(frozenset((k, omnihash(v)) for k, v in obj.items()))
else:
return hash(obj)
class BaseImporter(object):
""" BaseImporter
Override:
get_object(data)
limit_spec(spec) [optional, required if pseudo_ids are used]
prepare_for_db(data) [optional]
postimport() [optional]
"""
_type = None
model_class = None
related_models = {}
preserve_order = set()
merge_related = {}
cached_transformers = {}
def __init__(self, jurisdiction_id):
self.jurisdiction_id = jurisdiction_id
self.json_to_db_id = {}
self.duplicates = {}
self.pseudo_id_cache = {}
self.session_cache = {}
self.logger = logging.getLogger("pupa")
self.info = self.logger.info
self.debug = self.logger.debug
self.warning = self.logger.warning
self.error = self.logger.error
self.critical = self.logger.critical
# load transformers from appropriate setting
if settings.IMPORT_TRANSFORMERS.get(self._type):
self.cached_transformers = settings.IMPORT_TRANSFORMERS[self._type]
def get_session_id(self, identifier):
if identifier not in self.session_cache:
self.session_cache[identifier] = LegislativeSession.objects.get(
identifier=identifier, jurisdiction_id=self.jurisdiction_id).id
return self.session_cache[identifier]
# no-ops to be overriden
def prepare_for_db(self, data):
return data
def postimport(self):
pass
def resolve_json_id(self, json_id, allow_no_match=False):
"""
Given an id found in scraped JSON, return a DB id for the object.
params:
json_id: id from json
allow_no_match: just return None if id can't be resolved
returns:
database id
raises:
ValueError if id couldn't be resolved
"""
if not json_id:
return None
if json_id.startswith('~'):
# keep caches of all the pseudo-ids to avoid doing 1000s of lookups during import
if json_id not in self.pseudo_id_cache:
spec = get_pseudo_id(json_id)
spec = self.limit_spec(spec)
if isinstance(spec, Q):
objects = self.model_class.objects.filter(spec)
else:
objects = self.model_class.objects.filter(**spec)
ids = {each.id for each in objects}
if len(ids) == 1:
self.pseudo_id_cache[json_id] = ids.pop()
errmsg = None
elif not ids:
errmsg = 'cannot resolve pseudo id to {}: {}'.format(
self.model_class.__name__, json_id)
else:
errmsg = 'multiple objects returned for {} pseudo id {}: {}'.format(
self.model_class.__name__, json_id, ids)
# either raise or log error
if errmsg:
if not allow_no_match:
raise UnresolvedIdError(errmsg)
else:
self.error(errmsg)
self.pseudo_id_cache[json_id] = None
# return the cached object
return self.pseudo_id_cache[json_id]
# get the id that the duplicate points to, or use self
json_id = self.duplicates.get(json_id, json_id)
try:
return self.json_to_db_id[json_id]
except KeyError:
raise UnresolvedIdError('cannot resolve id: {}'.format(json_id))
def import_directory(self, datadir):
""" import a JSON directory into the database """
def json_stream():
# load all json, mapped by json_id
for fname in glob.glob(os.path.join(datadir, self._type + '_*.json')):
with open(fname) as f:
yield json.load(f)
return self.import_data(json_stream())
def _prepare_imports(self, dicts):
""" filters the import stream to remove duplicates
also serves as a good place to override if anything special has to be done to the
order of the import stream (see OrganizationImporter)
"""
# hash(json): id
seen_hashes = {}
for data in dicts:
json_id = data.pop('_id')
# map duplicates (using omnihash to tell if json dicts are identical-ish)
objhash = omnihash(data)
if objhash not in seen_hashes:
seen_hashes[objhash] = json_id
yield json_id, data
else:
self.duplicates[json_id] = seen_hashes[objhash]
def import_data(self, data_items):
""" import a bunch of dicts together """
# keep counts of all actions
record = {
'insert': 0, 'update': 0, 'noop': 0,
'start': utcnow(),
'records': {
'insert': [],
'update': [],
'noop': [],
}
}
for json_id, data in self._prepare_imports(data_items):
obj_id, what = self.import_item(data)
self.json_to_db_id[json_id] = obj_id
record['records'][what].append(obj_id)
record[what] += 1
# all objects are loaded, a perfect time to do inter-object resolution and other tasks
self.postimport()
record['end'] = utcnow()
return {self._type: record}
def import_item(self, data):
""" function used by import_data """
what = 'noop'
# remove the JSON _id (may still be there if called directly)
data.pop('_id', None)
# add fields/etc.
data = self.apply_transformers(data)
data = self.prepare_for_db(data)
try:
obj = self.get_object(data)
except self.model_class.DoesNotExist:
obj = None
# remove pupa_id which does not belong in the OCD data models
pupa_id = data.pop('pupa_id', None)
# pull related fields off
related = {}
for field in self.related_models:
related[field] = data.pop(field)
# obj existed, check if we need to do an update
if obj:
if obj.id in self.json_to_db_id.values():
raise DuplicateItemError(data, obj, related.get('sources', []))
# check base object for changes
for key, value in data.items():
if getattr(obj, key) != value and key not in obj.locked_fields:
setattr(obj, key, value)
what = 'update'
updated = self._update_related(obj, related, self.related_models)
if updated:
what = 'update'
if what == 'update':
obj.save()
# need to create the data
else:
what = 'insert'
try:
obj = self.model_class.objects.create(**data)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, data,
self.model_class))
self._create_related(obj, related, self.related_models)
if pupa_id:
Identifier.objects.get_or_create(identifier=pupa_id,
jurisdiction_id=self.jurisdiction_id,
defaults={'content_object': obj})
return obj.id, what
def _update_related(self, obj, related, subfield_dict):
"""
update DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
# keep track of whether or not anything was updated
updated = False
# for each related field - check if there are differences
for field, items in related.items():
# skip subitem check if it's locked anyway
if field in obj.locked_fields:
continue
# get items from database
dbitems = list(getattr(obj, field).all())
dbitems_count = len(dbitems)
# default to doing nothing
do_delete = do_update = False
if items and dbitems_count: # we have items, so does db, check for conflict
do_delete = do_update = items_differ(items, dbitems, subfield_dict[field][2])
elif items and not dbitems_count: # we have items, db doesn't, just update
do_update = True
elif not items and dbitems_count: # db has items, we don't, just delete
do_delete = True
# otherwise: no items or dbitems, so nothing is done
# don't delete if field is in merge_related
if field in self.merge_related:
new_items = []
# build a list of keyfields to existing database objects
keylist = self.merge_related[field]
keyed_dbitems = {tuple(getattr(item, k) for k in keylist):
item for item in dbitems}
# go through 'new' items
# if item with the same keyfields exists:
# update the database item w/ the new item's properties
# else:
# add it to new_items
for item in items:
key = tuple(item.get(k) for k in keylist)
dbitem = keyed_dbitems.get(key)
if not dbitem:
new_items.append(item)
else:
# update dbitem
for fname, val in item.items():
setattr(dbitem, fname, val)
dbitem.save()
# import anything that made it to new_items in the usual fashion
self._create_related(obj, {field: new_items}, subfield_dict)
else:
# default logic is to just wipe and recreate subobjects
if do_delete:
updated = True
getattr(obj, field).all().delete()
if do_update:
updated = True
self._create_related(obj, {field: items}, subfield_dict)
return updated
def _create_related(self, obj, related, subfield_dict):
"""
create DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
for field, items in related.items():
subobjects = []
all_subrelated = []
Subtype, reverse_id_field, subsubdict = subfield_dict[field]
for order, item in enumerate(items):
# pull off 'subrelated' (things that are related to this obj)
subrelated = {}
for subfield in subsubdict:
subrelated[subfield] = item.pop(subfield)
if field in self.preserve_order:
item['order'] = order
item[reverse_id_field] = obj.id
try:
subobjects.append(Subtype(**item))
all_subrelated.append(subrelated)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, item, Subtype))
# add all subobjects at once (really great for actions & votes)
try:
Subtype.objects.bulk_create(subobjects)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, subobjects, Subtype))
# after import the subobjects, import their subsubobjects
for subobj, subrel in zip(subobjects, all_subrelated):
self._create_related(subobj, subrel, subsubdict)
def lookup_obj_id(self, pupa_id, model):
content_type = ContentType.objects.get_for_model(model)
try:
obj_id = Identifier.objects.get(identifier=pupa_id,
content_type=content_type,
jurisdiction_id=self.jurisdiction_id).object_id
except Identifier.DoesNotExist:
obj_id = None
return obj_id
def apply_transformers(self, data, transformers=None):
if transformers is None:
transformers = self.cached_transformers
for key, key_transformers in transformers.items():
if key not in data:
continue
if isinstance(key_transformers, list):
for transformer in key_transformers:
data[key] = transformer(data[key])
elif isinstance(key_transformers, dict):
self.apply_transformers(data[key], key_transformers)
else:
data[key] = key_transformers(data[key])
return data
def get_seen_sessions(self):
return self.session_cache.values()
|
opencivicdata/pupa | pupa/importers/base.py | BaseImporter.resolve_json_id | python | def resolve_json_id(self, json_id, allow_no_match=False):
if not json_id:
return None
if json_id.startswith('~'):
# keep caches of all the pseudo-ids to avoid doing 1000s of lookups during import
if json_id not in self.pseudo_id_cache:
spec = get_pseudo_id(json_id)
spec = self.limit_spec(spec)
if isinstance(spec, Q):
objects = self.model_class.objects.filter(spec)
else:
objects = self.model_class.objects.filter(**spec)
ids = {each.id for each in objects}
if len(ids) == 1:
self.pseudo_id_cache[json_id] = ids.pop()
errmsg = None
elif not ids:
errmsg = 'cannot resolve pseudo id to {}: {}'.format(
self.model_class.__name__, json_id)
else:
errmsg = 'multiple objects returned for {} pseudo id {}: {}'.format(
self.model_class.__name__, json_id, ids)
# either raise or log error
if errmsg:
if not allow_no_match:
raise UnresolvedIdError(errmsg)
else:
self.error(errmsg)
self.pseudo_id_cache[json_id] = None
# return the cached object
return self.pseudo_id_cache[json_id]
# get the id that the duplicate points to, or use self
json_id = self.duplicates.get(json_id, json_id)
try:
return self.json_to_db_id[json_id]
except KeyError:
raise UnresolvedIdError('cannot resolve id: {}'.format(json_id)) | Given an id found in scraped JSON, return a DB id for the object.
params:
json_id: id from json
allow_no_match: just return None if id can't be resolved
returns:
database id
raises:
ValueError if id couldn't be resolved | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/importers/base.py#L130-L185 | [
"def get_pseudo_id(pid):\n if pid[0] != '~':\n raise ValueError(\"pseudo id doesn't start with ~\")\n return json.loads(pid[1:])\n",
"def limit_spec(self, spec):\n if spec.get('classification') != 'party':\n spec['jurisdiction_id'] = self.jurisdiction_id\n\n name = spec.pop('name', None)... | class BaseImporter(object):
""" BaseImporter
Override:
get_object(data)
limit_spec(spec) [optional, required if pseudo_ids are used]
prepare_for_db(data) [optional]
postimport() [optional]
"""
_type = None
model_class = None
related_models = {}
preserve_order = set()
merge_related = {}
cached_transformers = {}
def __init__(self, jurisdiction_id):
self.jurisdiction_id = jurisdiction_id
self.json_to_db_id = {}
self.duplicates = {}
self.pseudo_id_cache = {}
self.session_cache = {}
self.logger = logging.getLogger("pupa")
self.info = self.logger.info
self.debug = self.logger.debug
self.warning = self.logger.warning
self.error = self.logger.error
self.critical = self.logger.critical
# load transformers from appropriate setting
if settings.IMPORT_TRANSFORMERS.get(self._type):
self.cached_transformers = settings.IMPORT_TRANSFORMERS[self._type]
def get_session_id(self, identifier):
if identifier not in self.session_cache:
self.session_cache[identifier] = LegislativeSession.objects.get(
identifier=identifier, jurisdiction_id=self.jurisdiction_id).id
return self.session_cache[identifier]
# no-ops to be overriden
def prepare_for_db(self, data):
return data
def postimport(self):
pass
def import_directory(self, datadir):
""" import a JSON directory into the database """
def json_stream():
# load all json, mapped by json_id
for fname in glob.glob(os.path.join(datadir, self._type + '_*.json')):
with open(fname) as f:
yield json.load(f)
return self.import_data(json_stream())
def _prepare_imports(self, dicts):
""" filters the import stream to remove duplicates
also serves as a good place to override if anything special has to be done to the
order of the import stream (see OrganizationImporter)
"""
# hash(json): id
seen_hashes = {}
for data in dicts:
json_id = data.pop('_id')
# map duplicates (using omnihash to tell if json dicts are identical-ish)
objhash = omnihash(data)
if objhash not in seen_hashes:
seen_hashes[objhash] = json_id
yield json_id, data
else:
self.duplicates[json_id] = seen_hashes[objhash]
def import_data(self, data_items):
""" import a bunch of dicts together """
# keep counts of all actions
record = {
'insert': 0, 'update': 0, 'noop': 0,
'start': utcnow(),
'records': {
'insert': [],
'update': [],
'noop': [],
}
}
for json_id, data in self._prepare_imports(data_items):
obj_id, what = self.import_item(data)
self.json_to_db_id[json_id] = obj_id
record['records'][what].append(obj_id)
record[what] += 1
# all objects are loaded, a perfect time to do inter-object resolution and other tasks
self.postimport()
record['end'] = utcnow()
return {self._type: record}
def import_item(self, data):
""" function used by import_data """
what = 'noop'
# remove the JSON _id (may still be there if called directly)
data.pop('_id', None)
# add fields/etc.
data = self.apply_transformers(data)
data = self.prepare_for_db(data)
try:
obj = self.get_object(data)
except self.model_class.DoesNotExist:
obj = None
# remove pupa_id which does not belong in the OCD data models
pupa_id = data.pop('pupa_id', None)
# pull related fields off
related = {}
for field in self.related_models:
related[field] = data.pop(field)
# obj existed, check if we need to do an update
if obj:
if obj.id in self.json_to_db_id.values():
raise DuplicateItemError(data, obj, related.get('sources', []))
# check base object for changes
for key, value in data.items():
if getattr(obj, key) != value and key not in obj.locked_fields:
setattr(obj, key, value)
what = 'update'
updated = self._update_related(obj, related, self.related_models)
if updated:
what = 'update'
if what == 'update':
obj.save()
# need to create the data
else:
what = 'insert'
try:
obj = self.model_class.objects.create(**data)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, data,
self.model_class))
self._create_related(obj, related, self.related_models)
if pupa_id:
Identifier.objects.get_or_create(identifier=pupa_id,
jurisdiction_id=self.jurisdiction_id,
defaults={'content_object': obj})
return obj.id, what
def _update_related(self, obj, related, subfield_dict):
"""
update DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
# keep track of whether or not anything was updated
updated = False
# for each related field - check if there are differences
for field, items in related.items():
# skip subitem check if it's locked anyway
if field in obj.locked_fields:
continue
# get items from database
dbitems = list(getattr(obj, field).all())
dbitems_count = len(dbitems)
# default to doing nothing
do_delete = do_update = False
if items and dbitems_count: # we have items, so does db, check for conflict
do_delete = do_update = items_differ(items, dbitems, subfield_dict[field][2])
elif items and not dbitems_count: # we have items, db doesn't, just update
do_update = True
elif not items and dbitems_count: # db has items, we don't, just delete
do_delete = True
# otherwise: no items or dbitems, so nothing is done
# don't delete if field is in merge_related
if field in self.merge_related:
new_items = []
# build a list of keyfields to existing database objects
keylist = self.merge_related[field]
keyed_dbitems = {tuple(getattr(item, k) for k in keylist):
item for item in dbitems}
# go through 'new' items
# if item with the same keyfields exists:
# update the database item w/ the new item's properties
# else:
# add it to new_items
for item in items:
key = tuple(item.get(k) for k in keylist)
dbitem = keyed_dbitems.get(key)
if not dbitem:
new_items.append(item)
else:
# update dbitem
for fname, val in item.items():
setattr(dbitem, fname, val)
dbitem.save()
# import anything that made it to new_items in the usual fashion
self._create_related(obj, {field: new_items}, subfield_dict)
else:
# default logic is to just wipe and recreate subobjects
if do_delete:
updated = True
getattr(obj, field).all().delete()
if do_update:
updated = True
self._create_related(obj, {field: items}, subfield_dict)
return updated
def _create_related(self, obj, related, subfield_dict):
"""
create DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
for field, items in related.items():
subobjects = []
all_subrelated = []
Subtype, reverse_id_field, subsubdict = subfield_dict[field]
for order, item in enumerate(items):
# pull off 'subrelated' (things that are related to this obj)
subrelated = {}
for subfield in subsubdict:
subrelated[subfield] = item.pop(subfield)
if field in self.preserve_order:
item['order'] = order
item[reverse_id_field] = obj.id
try:
subobjects.append(Subtype(**item))
all_subrelated.append(subrelated)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, item, Subtype))
# add all subobjects at once (really great for actions & votes)
try:
Subtype.objects.bulk_create(subobjects)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, subobjects, Subtype))
# after import the subobjects, import their subsubobjects
for subobj, subrel in zip(subobjects, all_subrelated):
self._create_related(subobj, subrel, subsubdict)
def lookup_obj_id(self, pupa_id, model):
content_type = ContentType.objects.get_for_model(model)
try:
obj_id = Identifier.objects.get(identifier=pupa_id,
content_type=content_type,
jurisdiction_id=self.jurisdiction_id).object_id
except Identifier.DoesNotExist:
obj_id = None
return obj_id
def apply_transformers(self, data, transformers=None):
if transformers is None:
transformers = self.cached_transformers
for key, key_transformers in transformers.items():
if key not in data:
continue
if isinstance(key_transformers, list):
for transformer in key_transformers:
data[key] = transformer(data[key])
elif isinstance(key_transformers, dict):
self.apply_transformers(data[key], key_transformers)
else:
data[key] = key_transformers(data[key])
return data
def get_seen_sessions(self):
return self.session_cache.values()
|
opencivicdata/pupa | pupa/importers/base.py | BaseImporter.import_directory | python | def import_directory(self, datadir):
def json_stream():
# load all json, mapped by json_id
for fname in glob.glob(os.path.join(datadir, self._type + '_*.json')):
with open(fname) as f:
yield json.load(f)
return self.import_data(json_stream()) | import a JSON directory into the database | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/importers/base.py#L187-L196 | [
"def import_data(self, data_items):\n \"\"\" import a bunch of dicts together \"\"\"\n # keep counts of all actions\n record = {\n 'insert': 0, 'update': 0, 'noop': 0,\n 'start': utcnow(),\n 'records': {\n 'insert': [],\n 'update': [],\n 'noop': [],\n ... | class BaseImporter(object):
""" BaseImporter
Override:
get_object(data)
limit_spec(spec) [optional, required if pseudo_ids are used]
prepare_for_db(data) [optional]
postimport() [optional]
"""
_type = None
model_class = None
related_models = {}
preserve_order = set()
merge_related = {}
cached_transformers = {}
def __init__(self, jurisdiction_id):
self.jurisdiction_id = jurisdiction_id
self.json_to_db_id = {}
self.duplicates = {}
self.pseudo_id_cache = {}
self.session_cache = {}
self.logger = logging.getLogger("pupa")
self.info = self.logger.info
self.debug = self.logger.debug
self.warning = self.logger.warning
self.error = self.logger.error
self.critical = self.logger.critical
# load transformers from appropriate setting
if settings.IMPORT_TRANSFORMERS.get(self._type):
self.cached_transformers = settings.IMPORT_TRANSFORMERS[self._type]
def get_session_id(self, identifier):
if identifier not in self.session_cache:
self.session_cache[identifier] = LegislativeSession.objects.get(
identifier=identifier, jurisdiction_id=self.jurisdiction_id).id
return self.session_cache[identifier]
# no-ops to be overriden
def prepare_for_db(self, data):
return data
def postimport(self):
pass
def resolve_json_id(self, json_id, allow_no_match=False):
"""
Given an id found in scraped JSON, return a DB id for the object.
params:
json_id: id from json
allow_no_match: just return None if id can't be resolved
returns:
database id
raises:
ValueError if id couldn't be resolved
"""
if not json_id:
return None
if json_id.startswith('~'):
# keep caches of all the pseudo-ids to avoid doing 1000s of lookups during import
if json_id not in self.pseudo_id_cache:
spec = get_pseudo_id(json_id)
spec = self.limit_spec(spec)
if isinstance(spec, Q):
objects = self.model_class.objects.filter(spec)
else:
objects = self.model_class.objects.filter(**spec)
ids = {each.id for each in objects}
if len(ids) == 1:
self.pseudo_id_cache[json_id] = ids.pop()
errmsg = None
elif not ids:
errmsg = 'cannot resolve pseudo id to {}: {}'.format(
self.model_class.__name__, json_id)
else:
errmsg = 'multiple objects returned for {} pseudo id {}: {}'.format(
self.model_class.__name__, json_id, ids)
# either raise or log error
if errmsg:
if not allow_no_match:
raise UnresolvedIdError(errmsg)
else:
self.error(errmsg)
self.pseudo_id_cache[json_id] = None
# return the cached object
return self.pseudo_id_cache[json_id]
# get the id that the duplicate points to, or use self
json_id = self.duplicates.get(json_id, json_id)
try:
return self.json_to_db_id[json_id]
except KeyError:
raise UnresolvedIdError('cannot resolve id: {}'.format(json_id))
def _prepare_imports(self, dicts):
""" filters the import stream to remove duplicates
also serves as a good place to override if anything special has to be done to the
order of the import stream (see OrganizationImporter)
"""
# hash(json): id
seen_hashes = {}
for data in dicts:
json_id = data.pop('_id')
# map duplicates (using omnihash to tell if json dicts are identical-ish)
objhash = omnihash(data)
if objhash not in seen_hashes:
seen_hashes[objhash] = json_id
yield json_id, data
else:
self.duplicates[json_id] = seen_hashes[objhash]
def import_data(self, data_items):
""" import a bunch of dicts together """
# keep counts of all actions
record = {
'insert': 0, 'update': 0, 'noop': 0,
'start': utcnow(),
'records': {
'insert': [],
'update': [],
'noop': [],
}
}
for json_id, data in self._prepare_imports(data_items):
obj_id, what = self.import_item(data)
self.json_to_db_id[json_id] = obj_id
record['records'][what].append(obj_id)
record[what] += 1
# all objects are loaded, a perfect time to do inter-object resolution and other tasks
self.postimport()
record['end'] = utcnow()
return {self._type: record}
def import_item(self, data):
""" function used by import_data """
what = 'noop'
# remove the JSON _id (may still be there if called directly)
data.pop('_id', None)
# add fields/etc.
data = self.apply_transformers(data)
data = self.prepare_for_db(data)
try:
obj = self.get_object(data)
except self.model_class.DoesNotExist:
obj = None
# remove pupa_id which does not belong in the OCD data models
pupa_id = data.pop('pupa_id', None)
# pull related fields off
related = {}
for field in self.related_models:
related[field] = data.pop(field)
# obj existed, check if we need to do an update
if obj:
if obj.id in self.json_to_db_id.values():
raise DuplicateItemError(data, obj, related.get('sources', []))
# check base object for changes
for key, value in data.items():
if getattr(obj, key) != value and key not in obj.locked_fields:
setattr(obj, key, value)
what = 'update'
updated = self._update_related(obj, related, self.related_models)
if updated:
what = 'update'
if what == 'update':
obj.save()
# need to create the data
else:
what = 'insert'
try:
obj = self.model_class.objects.create(**data)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, data,
self.model_class))
self._create_related(obj, related, self.related_models)
if pupa_id:
Identifier.objects.get_or_create(identifier=pupa_id,
jurisdiction_id=self.jurisdiction_id,
defaults={'content_object': obj})
return obj.id, what
def _update_related(self, obj, related, subfield_dict):
"""
update DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
# keep track of whether or not anything was updated
updated = False
# for each related field - check if there are differences
for field, items in related.items():
# skip subitem check if it's locked anyway
if field in obj.locked_fields:
continue
# get items from database
dbitems = list(getattr(obj, field).all())
dbitems_count = len(dbitems)
# default to doing nothing
do_delete = do_update = False
if items and dbitems_count: # we have items, so does db, check for conflict
do_delete = do_update = items_differ(items, dbitems, subfield_dict[field][2])
elif items and not dbitems_count: # we have items, db doesn't, just update
do_update = True
elif not items and dbitems_count: # db has items, we don't, just delete
do_delete = True
# otherwise: no items or dbitems, so nothing is done
# don't delete if field is in merge_related
if field in self.merge_related:
new_items = []
# build a list of keyfields to existing database objects
keylist = self.merge_related[field]
keyed_dbitems = {tuple(getattr(item, k) for k in keylist):
item for item in dbitems}
# go through 'new' items
# if item with the same keyfields exists:
# update the database item w/ the new item's properties
# else:
# add it to new_items
for item in items:
key = tuple(item.get(k) for k in keylist)
dbitem = keyed_dbitems.get(key)
if not dbitem:
new_items.append(item)
else:
# update dbitem
for fname, val in item.items():
setattr(dbitem, fname, val)
dbitem.save()
# import anything that made it to new_items in the usual fashion
self._create_related(obj, {field: new_items}, subfield_dict)
else:
# default logic is to just wipe and recreate subobjects
if do_delete:
updated = True
getattr(obj, field).all().delete()
if do_update:
updated = True
self._create_related(obj, {field: items}, subfield_dict)
return updated
def _create_related(self, obj, related, subfield_dict):
"""
create DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
for field, items in related.items():
subobjects = []
all_subrelated = []
Subtype, reverse_id_field, subsubdict = subfield_dict[field]
for order, item in enumerate(items):
# pull off 'subrelated' (things that are related to this obj)
subrelated = {}
for subfield in subsubdict:
subrelated[subfield] = item.pop(subfield)
if field in self.preserve_order:
item['order'] = order
item[reverse_id_field] = obj.id
try:
subobjects.append(Subtype(**item))
all_subrelated.append(subrelated)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, item, Subtype))
# add all subobjects at once (really great for actions & votes)
try:
Subtype.objects.bulk_create(subobjects)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, subobjects, Subtype))
# after import the subobjects, import their subsubobjects
for subobj, subrel in zip(subobjects, all_subrelated):
self._create_related(subobj, subrel, subsubdict)
def lookup_obj_id(self, pupa_id, model):
content_type = ContentType.objects.get_for_model(model)
try:
obj_id = Identifier.objects.get(identifier=pupa_id,
content_type=content_type,
jurisdiction_id=self.jurisdiction_id).object_id
except Identifier.DoesNotExist:
obj_id = None
return obj_id
def apply_transformers(self, data, transformers=None):
if transformers is None:
transformers = self.cached_transformers
for key, key_transformers in transformers.items():
if key not in data:
continue
if isinstance(key_transformers, list):
for transformer in key_transformers:
data[key] = transformer(data[key])
elif isinstance(key_transformers, dict):
self.apply_transformers(data[key], key_transformers)
else:
data[key] = key_transformers(data[key])
return data
def get_seen_sessions(self):
return self.session_cache.values()
|
opencivicdata/pupa | pupa/importers/base.py | BaseImporter._prepare_imports | python | def _prepare_imports(self, dicts):
# hash(json): id
seen_hashes = {}
for data in dicts:
json_id = data.pop('_id')
# map duplicates (using omnihash to tell if json dicts are identical-ish)
objhash = omnihash(data)
if objhash not in seen_hashes:
seen_hashes[objhash] = json_id
yield json_id, data
else:
self.duplicates[json_id] = seen_hashes[objhash] | filters the import stream to remove duplicates
also serves as a good place to override if anything special has to be done to the
order of the import stream (see OrganizationImporter) | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/importers/base.py#L198-L217 | [
"def omnihash(obj):\n \"\"\" recursively hash unhashable objects \"\"\"\n if isinstance(obj, set):\n return hash(frozenset(omnihash(e) for e in obj))\n elif isinstance(obj, (tuple, list)):\n return hash(tuple(omnihash(e) for e in obj))\n elif isinstance(obj, dict):\n return hash(fro... | class BaseImporter(object):
""" BaseImporter
Override:
get_object(data)
limit_spec(spec) [optional, required if pseudo_ids are used]
prepare_for_db(data) [optional]
postimport() [optional]
"""
_type = None
model_class = None
related_models = {}
preserve_order = set()
merge_related = {}
cached_transformers = {}
def __init__(self, jurisdiction_id):
self.jurisdiction_id = jurisdiction_id
self.json_to_db_id = {}
self.duplicates = {}
self.pseudo_id_cache = {}
self.session_cache = {}
self.logger = logging.getLogger("pupa")
self.info = self.logger.info
self.debug = self.logger.debug
self.warning = self.logger.warning
self.error = self.logger.error
self.critical = self.logger.critical
# load transformers from appropriate setting
if settings.IMPORT_TRANSFORMERS.get(self._type):
self.cached_transformers = settings.IMPORT_TRANSFORMERS[self._type]
def get_session_id(self, identifier):
if identifier not in self.session_cache:
self.session_cache[identifier] = LegislativeSession.objects.get(
identifier=identifier, jurisdiction_id=self.jurisdiction_id).id
return self.session_cache[identifier]
# no-ops to be overriden
def prepare_for_db(self, data):
return data
def postimport(self):
pass
def resolve_json_id(self, json_id, allow_no_match=False):
"""
Given an id found in scraped JSON, return a DB id for the object.
params:
json_id: id from json
allow_no_match: just return None if id can't be resolved
returns:
database id
raises:
ValueError if id couldn't be resolved
"""
if not json_id:
return None
if json_id.startswith('~'):
# keep caches of all the pseudo-ids to avoid doing 1000s of lookups during import
if json_id not in self.pseudo_id_cache:
spec = get_pseudo_id(json_id)
spec = self.limit_spec(spec)
if isinstance(spec, Q):
objects = self.model_class.objects.filter(spec)
else:
objects = self.model_class.objects.filter(**spec)
ids = {each.id for each in objects}
if len(ids) == 1:
self.pseudo_id_cache[json_id] = ids.pop()
errmsg = None
elif not ids:
errmsg = 'cannot resolve pseudo id to {}: {}'.format(
self.model_class.__name__, json_id)
else:
errmsg = 'multiple objects returned for {} pseudo id {}: {}'.format(
self.model_class.__name__, json_id, ids)
# either raise or log error
if errmsg:
if not allow_no_match:
raise UnresolvedIdError(errmsg)
else:
self.error(errmsg)
self.pseudo_id_cache[json_id] = None
# return the cached object
return self.pseudo_id_cache[json_id]
# get the id that the duplicate points to, or use self
json_id = self.duplicates.get(json_id, json_id)
try:
return self.json_to_db_id[json_id]
except KeyError:
raise UnresolvedIdError('cannot resolve id: {}'.format(json_id))
def import_directory(self, datadir):
""" import a JSON directory into the database """
def json_stream():
# load all json, mapped by json_id
for fname in glob.glob(os.path.join(datadir, self._type + '_*.json')):
with open(fname) as f:
yield json.load(f)
return self.import_data(json_stream())
def import_data(self, data_items):
""" import a bunch of dicts together """
# keep counts of all actions
record = {
'insert': 0, 'update': 0, 'noop': 0,
'start': utcnow(),
'records': {
'insert': [],
'update': [],
'noop': [],
}
}
for json_id, data in self._prepare_imports(data_items):
obj_id, what = self.import_item(data)
self.json_to_db_id[json_id] = obj_id
record['records'][what].append(obj_id)
record[what] += 1
# all objects are loaded, a perfect time to do inter-object resolution and other tasks
self.postimport()
record['end'] = utcnow()
return {self._type: record}
def import_item(self, data):
""" function used by import_data """
what = 'noop'
# remove the JSON _id (may still be there if called directly)
data.pop('_id', None)
# add fields/etc.
data = self.apply_transformers(data)
data = self.prepare_for_db(data)
try:
obj = self.get_object(data)
except self.model_class.DoesNotExist:
obj = None
# remove pupa_id which does not belong in the OCD data models
pupa_id = data.pop('pupa_id', None)
# pull related fields off
related = {}
for field in self.related_models:
related[field] = data.pop(field)
# obj existed, check if we need to do an update
if obj:
if obj.id in self.json_to_db_id.values():
raise DuplicateItemError(data, obj, related.get('sources', []))
# check base object for changes
for key, value in data.items():
if getattr(obj, key) != value and key not in obj.locked_fields:
setattr(obj, key, value)
what = 'update'
updated = self._update_related(obj, related, self.related_models)
if updated:
what = 'update'
if what == 'update':
obj.save()
# need to create the data
else:
what = 'insert'
try:
obj = self.model_class.objects.create(**data)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, data,
self.model_class))
self._create_related(obj, related, self.related_models)
if pupa_id:
Identifier.objects.get_or_create(identifier=pupa_id,
jurisdiction_id=self.jurisdiction_id,
defaults={'content_object': obj})
return obj.id, what
def _update_related(self, obj, related, subfield_dict):
"""
update DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
# keep track of whether or not anything was updated
updated = False
# for each related field - check if there are differences
for field, items in related.items():
# skip subitem check if it's locked anyway
if field in obj.locked_fields:
continue
# get items from database
dbitems = list(getattr(obj, field).all())
dbitems_count = len(dbitems)
# default to doing nothing
do_delete = do_update = False
if items and dbitems_count: # we have items, so does db, check for conflict
do_delete = do_update = items_differ(items, dbitems, subfield_dict[field][2])
elif items and not dbitems_count: # we have items, db doesn't, just update
do_update = True
elif not items and dbitems_count: # db has items, we don't, just delete
do_delete = True
# otherwise: no items or dbitems, so nothing is done
# don't delete if field is in merge_related
if field in self.merge_related:
new_items = []
# build a list of keyfields to existing database objects
keylist = self.merge_related[field]
keyed_dbitems = {tuple(getattr(item, k) for k in keylist):
item for item in dbitems}
# go through 'new' items
# if item with the same keyfields exists:
# update the database item w/ the new item's properties
# else:
# add it to new_items
for item in items:
key = tuple(item.get(k) for k in keylist)
dbitem = keyed_dbitems.get(key)
if not dbitem:
new_items.append(item)
else:
# update dbitem
for fname, val in item.items():
setattr(dbitem, fname, val)
dbitem.save()
# import anything that made it to new_items in the usual fashion
self._create_related(obj, {field: new_items}, subfield_dict)
else:
# default logic is to just wipe and recreate subobjects
if do_delete:
updated = True
getattr(obj, field).all().delete()
if do_update:
updated = True
self._create_related(obj, {field: items}, subfield_dict)
return updated
def _create_related(self, obj, related, subfield_dict):
"""
create DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
for field, items in related.items():
subobjects = []
all_subrelated = []
Subtype, reverse_id_field, subsubdict = subfield_dict[field]
for order, item in enumerate(items):
# pull off 'subrelated' (things that are related to this obj)
subrelated = {}
for subfield in subsubdict:
subrelated[subfield] = item.pop(subfield)
if field in self.preserve_order:
item['order'] = order
item[reverse_id_field] = obj.id
try:
subobjects.append(Subtype(**item))
all_subrelated.append(subrelated)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, item, Subtype))
# add all subobjects at once (really great for actions & votes)
try:
Subtype.objects.bulk_create(subobjects)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, subobjects, Subtype))
# after import the subobjects, import their subsubobjects
for subobj, subrel in zip(subobjects, all_subrelated):
self._create_related(subobj, subrel, subsubdict)
def lookup_obj_id(self, pupa_id, model):
content_type = ContentType.objects.get_for_model(model)
try:
obj_id = Identifier.objects.get(identifier=pupa_id,
content_type=content_type,
jurisdiction_id=self.jurisdiction_id).object_id
except Identifier.DoesNotExist:
obj_id = None
return obj_id
def apply_transformers(self, data, transformers=None):
if transformers is None:
transformers = self.cached_transformers
for key, key_transformers in transformers.items():
if key not in data:
continue
if isinstance(key_transformers, list):
for transformer in key_transformers:
data[key] = transformer(data[key])
elif isinstance(key_transformers, dict):
self.apply_transformers(data[key], key_transformers)
else:
data[key] = key_transformers(data[key])
return data
def get_seen_sessions(self):
return self.session_cache.values()
|
opencivicdata/pupa | pupa/importers/base.py | BaseImporter.import_data | python | def import_data(self, data_items):
# keep counts of all actions
record = {
'insert': 0, 'update': 0, 'noop': 0,
'start': utcnow(),
'records': {
'insert': [],
'update': [],
'noop': [],
}
}
for json_id, data in self._prepare_imports(data_items):
obj_id, what = self.import_item(data)
self.json_to_db_id[json_id] = obj_id
record['records'][what].append(obj_id)
record[what] += 1
# all objects are loaded, a perfect time to do inter-object resolution and other tasks
self.postimport()
record['end'] = utcnow()
return {self._type: record} | import a bunch of dicts together | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/importers/base.py#L219-L243 | [
"def utcnow():\n return datetime.datetime.now(datetime.timezone.utc)\n",
"def postimport(self):\n pass\n",
"def _prepare_imports(self, dicts):\n\n \"\"\" filters the import stream to remove duplicates\n\n also serves as a good place to override if anything special has to be done to the\n order of... | class BaseImporter(object):
""" BaseImporter
Override:
get_object(data)
limit_spec(spec) [optional, required if pseudo_ids are used]
prepare_for_db(data) [optional]
postimport() [optional]
"""
_type = None
model_class = None
related_models = {}
preserve_order = set()
merge_related = {}
cached_transformers = {}
def __init__(self, jurisdiction_id):
self.jurisdiction_id = jurisdiction_id
self.json_to_db_id = {}
self.duplicates = {}
self.pseudo_id_cache = {}
self.session_cache = {}
self.logger = logging.getLogger("pupa")
self.info = self.logger.info
self.debug = self.logger.debug
self.warning = self.logger.warning
self.error = self.logger.error
self.critical = self.logger.critical
# load transformers from appropriate setting
if settings.IMPORT_TRANSFORMERS.get(self._type):
self.cached_transformers = settings.IMPORT_TRANSFORMERS[self._type]
def get_session_id(self, identifier):
if identifier not in self.session_cache:
self.session_cache[identifier] = LegislativeSession.objects.get(
identifier=identifier, jurisdiction_id=self.jurisdiction_id).id
return self.session_cache[identifier]
# no-ops to be overriden
def prepare_for_db(self, data):
return data
def postimport(self):
pass
def resolve_json_id(self, json_id, allow_no_match=False):
"""
Given an id found in scraped JSON, return a DB id for the object.
params:
json_id: id from json
allow_no_match: just return None if id can't be resolved
returns:
database id
raises:
ValueError if id couldn't be resolved
"""
if not json_id:
return None
if json_id.startswith('~'):
# keep caches of all the pseudo-ids to avoid doing 1000s of lookups during import
if json_id not in self.pseudo_id_cache:
spec = get_pseudo_id(json_id)
spec = self.limit_spec(spec)
if isinstance(spec, Q):
objects = self.model_class.objects.filter(spec)
else:
objects = self.model_class.objects.filter(**spec)
ids = {each.id for each in objects}
if len(ids) == 1:
self.pseudo_id_cache[json_id] = ids.pop()
errmsg = None
elif not ids:
errmsg = 'cannot resolve pseudo id to {}: {}'.format(
self.model_class.__name__, json_id)
else:
errmsg = 'multiple objects returned for {} pseudo id {}: {}'.format(
self.model_class.__name__, json_id, ids)
# either raise or log error
if errmsg:
if not allow_no_match:
raise UnresolvedIdError(errmsg)
else:
self.error(errmsg)
self.pseudo_id_cache[json_id] = None
# return the cached object
return self.pseudo_id_cache[json_id]
# get the id that the duplicate points to, or use self
json_id = self.duplicates.get(json_id, json_id)
try:
return self.json_to_db_id[json_id]
except KeyError:
raise UnresolvedIdError('cannot resolve id: {}'.format(json_id))
def import_directory(self, datadir):
""" import a JSON directory into the database """
def json_stream():
# load all json, mapped by json_id
for fname in glob.glob(os.path.join(datadir, self._type + '_*.json')):
with open(fname) as f:
yield json.load(f)
return self.import_data(json_stream())
def _prepare_imports(self, dicts):
""" filters the import stream to remove duplicates
also serves as a good place to override if anything special has to be done to the
order of the import stream (see OrganizationImporter)
"""
# hash(json): id
seen_hashes = {}
for data in dicts:
json_id = data.pop('_id')
# map duplicates (using omnihash to tell if json dicts are identical-ish)
objhash = omnihash(data)
if objhash not in seen_hashes:
seen_hashes[objhash] = json_id
yield json_id, data
else:
self.duplicates[json_id] = seen_hashes[objhash]
def import_item(self, data):
""" function used by import_data """
what = 'noop'
# remove the JSON _id (may still be there if called directly)
data.pop('_id', None)
# add fields/etc.
data = self.apply_transformers(data)
data = self.prepare_for_db(data)
try:
obj = self.get_object(data)
except self.model_class.DoesNotExist:
obj = None
# remove pupa_id which does not belong in the OCD data models
pupa_id = data.pop('pupa_id', None)
# pull related fields off
related = {}
for field in self.related_models:
related[field] = data.pop(field)
# obj existed, check if we need to do an update
if obj:
if obj.id in self.json_to_db_id.values():
raise DuplicateItemError(data, obj, related.get('sources', []))
# check base object for changes
for key, value in data.items():
if getattr(obj, key) != value and key not in obj.locked_fields:
setattr(obj, key, value)
what = 'update'
updated = self._update_related(obj, related, self.related_models)
if updated:
what = 'update'
if what == 'update':
obj.save()
# need to create the data
else:
what = 'insert'
try:
obj = self.model_class.objects.create(**data)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, data,
self.model_class))
self._create_related(obj, related, self.related_models)
if pupa_id:
Identifier.objects.get_or_create(identifier=pupa_id,
jurisdiction_id=self.jurisdiction_id,
defaults={'content_object': obj})
return obj.id, what
def _update_related(self, obj, related, subfield_dict):
"""
update DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
# keep track of whether or not anything was updated
updated = False
# for each related field - check if there are differences
for field, items in related.items():
# skip subitem check if it's locked anyway
if field in obj.locked_fields:
continue
# get items from database
dbitems = list(getattr(obj, field).all())
dbitems_count = len(dbitems)
# default to doing nothing
do_delete = do_update = False
if items and dbitems_count: # we have items, so does db, check for conflict
do_delete = do_update = items_differ(items, dbitems, subfield_dict[field][2])
elif items and not dbitems_count: # we have items, db doesn't, just update
do_update = True
elif not items and dbitems_count: # db has items, we don't, just delete
do_delete = True
# otherwise: no items or dbitems, so nothing is done
# don't delete if field is in merge_related
if field in self.merge_related:
new_items = []
# build a list of keyfields to existing database objects
keylist = self.merge_related[field]
keyed_dbitems = {tuple(getattr(item, k) for k in keylist):
item for item in dbitems}
# go through 'new' items
# if item with the same keyfields exists:
# update the database item w/ the new item's properties
# else:
# add it to new_items
for item in items:
key = tuple(item.get(k) for k in keylist)
dbitem = keyed_dbitems.get(key)
if not dbitem:
new_items.append(item)
else:
# update dbitem
for fname, val in item.items():
setattr(dbitem, fname, val)
dbitem.save()
# import anything that made it to new_items in the usual fashion
self._create_related(obj, {field: new_items}, subfield_dict)
else:
# default logic is to just wipe and recreate subobjects
if do_delete:
updated = True
getattr(obj, field).all().delete()
if do_update:
updated = True
self._create_related(obj, {field: items}, subfield_dict)
return updated
def _create_related(self, obj, related, subfield_dict):
"""
create DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
for field, items in related.items():
subobjects = []
all_subrelated = []
Subtype, reverse_id_field, subsubdict = subfield_dict[field]
for order, item in enumerate(items):
# pull off 'subrelated' (things that are related to this obj)
subrelated = {}
for subfield in subsubdict:
subrelated[subfield] = item.pop(subfield)
if field in self.preserve_order:
item['order'] = order
item[reverse_id_field] = obj.id
try:
subobjects.append(Subtype(**item))
all_subrelated.append(subrelated)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, item, Subtype))
# add all subobjects at once (really great for actions & votes)
try:
Subtype.objects.bulk_create(subobjects)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, subobjects, Subtype))
# after import the subobjects, import their subsubobjects
for subobj, subrel in zip(subobjects, all_subrelated):
self._create_related(subobj, subrel, subsubdict)
def lookup_obj_id(self, pupa_id, model):
content_type = ContentType.objects.get_for_model(model)
try:
obj_id = Identifier.objects.get(identifier=pupa_id,
content_type=content_type,
jurisdiction_id=self.jurisdiction_id).object_id
except Identifier.DoesNotExist:
obj_id = None
return obj_id
def apply_transformers(self, data, transformers=None):
if transformers is None:
transformers = self.cached_transformers
for key, key_transformers in transformers.items():
if key not in data:
continue
if isinstance(key_transformers, list):
for transformer in key_transformers:
data[key] = transformer(data[key])
elif isinstance(key_transformers, dict):
self.apply_transformers(data[key], key_transformers)
else:
data[key] = key_transformers(data[key])
return data
def get_seen_sessions(self):
return self.session_cache.values()
|
opencivicdata/pupa | pupa/importers/base.py | BaseImporter.import_item | python | def import_item(self, data):
what = 'noop'
# remove the JSON _id (may still be there if called directly)
data.pop('_id', None)
# add fields/etc.
data = self.apply_transformers(data)
data = self.prepare_for_db(data)
try:
obj = self.get_object(data)
except self.model_class.DoesNotExist:
obj = None
# remove pupa_id which does not belong in the OCD data models
pupa_id = data.pop('pupa_id', None)
# pull related fields off
related = {}
for field in self.related_models:
related[field] = data.pop(field)
# obj existed, check if we need to do an update
if obj:
if obj.id in self.json_to_db_id.values():
raise DuplicateItemError(data, obj, related.get('sources', []))
# check base object for changes
for key, value in data.items():
if getattr(obj, key) != value and key not in obj.locked_fields:
setattr(obj, key, value)
what = 'update'
updated = self._update_related(obj, related, self.related_models)
if updated:
what = 'update'
if what == 'update':
obj.save()
# need to create the data
else:
what = 'insert'
try:
obj = self.model_class.objects.create(**data)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, data,
self.model_class))
self._create_related(obj, related, self.related_models)
if pupa_id:
Identifier.objects.get_or_create(identifier=pupa_id,
jurisdiction_id=self.jurisdiction_id,
defaults={'content_object': obj})
return obj.id, what | function used by import_data | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/importers/base.py#L245-L301 | [
"def prepare_for_db(self, data):\n return data\n",
"def _create_related(self, obj, related, subfield_dict):\n \"\"\"\n create DB objects related to a base object\n obj: a base object to create related\n related: dict mapping field names to lists of related objects\n ... | class BaseImporter(object):
""" BaseImporter
Override:
get_object(data)
limit_spec(spec) [optional, required if pseudo_ids are used]
prepare_for_db(data) [optional]
postimport() [optional]
"""
_type = None
model_class = None
related_models = {}
preserve_order = set()
merge_related = {}
cached_transformers = {}
def __init__(self, jurisdiction_id):
self.jurisdiction_id = jurisdiction_id
self.json_to_db_id = {}
self.duplicates = {}
self.pseudo_id_cache = {}
self.session_cache = {}
self.logger = logging.getLogger("pupa")
self.info = self.logger.info
self.debug = self.logger.debug
self.warning = self.logger.warning
self.error = self.logger.error
self.critical = self.logger.critical
# load transformers from appropriate setting
if settings.IMPORT_TRANSFORMERS.get(self._type):
self.cached_transformers = settings.IMPORT_TRANSFORMERS[self._type]
def get_session_id(self, identifier):
if identifier not in self.session_cache:
self.session_cache[identifier] = LegislativeSession.objects.get(
identifier=identifier, jurisdiction_id=self.jurisdiction_id).id
return self.session_cache[identifier]
# no-ops to be overriden
def prepare_for_db(self, data):
return data
def postimport(self):
pass
def resolve_json_id(self, json_id, allow_no_match=False):
"""
Given an id found in scraped JSON, return a DB id for the object.
params:
json_id: id from json
allow_no_match: just return None if id can't be resolved
returns:
database id
raises:
ValueError if id couldn't be resolved
"""
if not json_id:
return None
if json_id.startswith('~'):
# keep caches of all the pseudo-ids to avoid doing 1000s of lookups during import
if json_id not in self.pseudo_id_cache:
spec = get_pseudo_id(json_id)
spec = self.limit_spec(spec)
if isinstance(spec, Q):
objects = self.model_class.objects.filter(spec)
else:
objects = self.model_class.objects.filter(**spec)
ids = {each.id for each in objects}
if len(ids) == 1:
self.pseudo_id_cache[json_id] = ids.pop()
errmsg = None
elif not ids:
errmsg = 'cannot resolve pseudo id to {}: {}'.format(
self.model_class.__name__, json_id)
else:
errmsg = 'multiple objects returned for {} pseudo id {}: {}'.format(
self.model_class.__name__, json_id, ids)
# either raise or log error
if errmsg:
if not allow_no_match:
raise UnresolvedIdError(errmsg)
else:
self.error(errmsg)
self.pseudo_id_cache[json_id] = None
# return the cached object
return self.pseudo_id_cache[json_id]
# get the id that the duplicate points to, or use self
json_id = self.duplicates.get(json_id, json_id)
try:
return self.json_to_db_id[json_id]
except KeyError:
raise UnresolvedIdError('cannot resolve id: {}'.format(json_id))
def import_directory(self, datadir):
""" import a JSON directory into the database """
def json_stream():
# load all json, mapped by json_id
for fname in glob.glob(os.path.join(datadir, self._type + '_*.json')):
with open(fname) as f:
yield json.load(f)
return self.import_data(json_stream())
def _prepare_imports(self, dicts):
""" filters the import stream to remove duplicates
also serves as a good place to override if anything special has to be done to the
order of the import stream (see OrganizationImporter)
"""
# hash(json): id
seen_hashes = {}
for data in dicts:
json_id = data.pop('_id')
# map duplicates (using omnihash to tell if json dicts are identical-ish)
objhash = omnihash(data)
if objhash not in seen_hashes:
seen_hashes[objhash] = json_id
yield json_id, data
else:
self.duplicates[json_id] = seen_hashes[objhash]
def import_data(self, data_items):
""" import a bunch of dicts together """
# keep counts of all actions
record = {
'insert': 0, 'update': 0, 'noop': 0,
'start': utcnow(),
'records': {
'insert': [],
'update': [],
'noop': [],
}
}
for json_id, data in self._prepare_imports(data_items):
obj_id, what = self.import_item(data)
self.json_to_db_id[json_id] = obj_id
record['records'][what].append(obj_id)
record[what] += 1
# all objects are loaded, a perfect time to do inter-object resolution and other tasks
self.postimport()
record['end'] = utcnow()
return {self._type: record}
def _update_related(self, obj, related, subfield_dict):
"""
update DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
# keep track of whether or not anything was updated
updated = False
# for each related field - check if there are differences
for field, items in related.items():
# skip subitem check if it's locked anyway
if field in obj.locked_fields:
continue
# get items from database
dbitems = list(getattr(obj, field).all())
dbitems_count = len(dbitems)
# default to doing nothing
do_delete = do_update = False
if items and dbitems_count: # we have items, so does db, check for conflict
do_delete = do_update = items_differ(items, dbitems, subfield_dict[field][2])
elif items and not dbitems_count: # we have items, db doesn't, just update
do_update = True
elif not items and dbitems_count: # db has items, we don't, just delete
do_delete = True
# otherwise: no items or dbitems, so nothing is done
# don't delete if field is in merge_related
if field in self.merge_related:
new_items = []
# build a list of keyfields to existing database objects
keylist = self.merge_related[field]
keyed_dbitems = {tuple(getattr(item, k) for k in keylist):
item for item in dbitems}
# go through 'new' items
# if item with the same keyfields exists:
# update the database item w/ the new item's properties
# else:
# add it to new_items
for item in items:
key = tuple(item.get(k) for k in keylist)
dbitem = keyed_dbitems.get(key)
if not dbitem:
new_items.append(item)
else:
# update dbitem
for fname, val in item.items():
setattr(dbitem, fname, val)
dbitem.save()
# import anything that made it to new_items in the usual fashion
self._create_related(obj, {field: new_items}, subfield_dict)
else:
# default logic is to just wipe and recreate subobjects
if do_delete:
updated = True
getattr(obj, field).all().delete()
if do_update:
updated = True
self._create_related(obj, {field: items}, subfield_dict)
return updated
def _create_related(self, obj, related, subfield_dict):
"""
create DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
for field, items in related.items():
subobjects = []
all_subrelated = []
Subtype, reverse_id_field, subsubdict = subfield_dict[field]
for order, item in enumerate(items):
# pull off 'subrelated' (things that are related to this obj)
subrelated = {}
for subfield in subsubdict:
subrelated[subfield] = item.pop(subfield)
if field in self.preserve_order:
item['order'] = order
item[reverse_id_field] = obj.id
try:
subobjects.append(Subtype(**item))
all_subrelated.append(subrelated)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, item, Subtype))
# add all subobjects at once (really great for actions & votes)
try:
Subtype.objects.bulk_create(subobjects)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, subobjects, Subtype))
# after import the subobjects, import their subsubobjects
for subobj, subrel in zip(subobjects, all_subrelated):
self._create_related(subobj, subrel, subsubdict)
def lookup_obj_id(self, pupa_id, model):
content_type = ContentType.objects.get_for_model(model)
try:
obj_id = Identifier.objects.get(identifier=pupa_id,
content_type=content_type,
jurisdiction_id=self.jurisdiction_id).object_id
except Identifier.DoesNotExist:
obj_id = None
return obj_id
def apply_transformers(self, data, transformers=None):
if transformers is None:
transformers = self.cached_transformers
for key, key_transformers in transformers.items():
if key not in data:
continue
if isinstance(key_transformers, list):
for transformer in key_transformers:
data[key] = transformer(data[key])
elif isinstance(key_transformers, dict):
self.apply_transformers(data[key], key_transformers)
else:
data[key] = key_transformers(data[key])
return data
def get_seen_sessions(self):
return self.session_cache.values()
|
opencivicdata/pupa | pupa/importers/base.py | BaseImporter._update_related | python | def _update_related(self, obj, related, subfield_dict):
# keep track of whether or not anything was updated
updated = False
# for each related field - check if there are differences
for field, items in related.items():
# skip subitem check if it's locked anyway
if field in obj.locked_fields:
continue
# get items from database
dbitems = list(getattr(obj, field).all())
dbitems_count = len(dbitems)
# default to doing nothing
do_delete = do_update = False
if items and dbitems_count: # we have items, so does db, check for conflict
do_delete = do_update = items_differ(items, dbitems, subfield_dict[field][2])
elif items and not dbitems_count: # we have items, db doesn't, just update
do_update = True
elif not items and dbitems_count: # db has items, we don't, just delete
do_delete = True
# otherwise: no items or dbitems, so nothing is done
# don't delete if field is in merge_related
if field in self.merge_related:
new_items = []
# build a list of keyfields to existing database objects
keylist = self.merge_related[field]
keyed_dbitems = {tuple(getattr(item, k) for k in keylist):
item for item in dbitems}
# go through 'new' items
# if item with the same keyfields exists:
# update the database item w/ the new item's properties
# else:
# add it to new_items
for item in items:
key = tuple(item.get(k) for k in keylist)
dbitem = keyed_dbitems.get(key)
if not dbitem:
new_items.append(item)
else:
# update dbitem
for fname, val in item.items():
setattr(dbitem, fname, val)
dbitem.save()
# import anything that made it to new_items in the usual fashion
self._create_related(obj, {field: new_items}, subfield_dict)
else:
# default logic is to just wipe and recreate subobjects
if do_delete:
updated = True
getattr(obj, field).all().delete()
if do_update:
updated = True
self._create_related(obj, {field: items}, subfield_dict)
return updated | update DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/importers/base.py#L303-L369 | null | class BaseImporter(object):
""" BaseImporter
Override:
get_object(data)
limit_spec(spec) [optional, required if pseudo_ids are used]
prepare_for_db(data) [optional]
postimport() [optional]
"""
_type = None
model_class = None
related_models = {}
preserve_order = set()
merge_related = {}
cached_transformers = {}
def __init__(self, jurisdiction_id):
self.jurisdiction_id = jurisdiction_id
self.json_to_db_id = {}
self.duplicates = {}
self.pseudo_id_cache = {}
self.session_cache = {}
self.logger = logging.getLogger("pupa")
self.info = self.logger.info
self.debug = self.logger.debug
self.warning = self.logger.warning
self.error = self.logger.error
self.critical = self.logger.critical
# load transformers from appropriate setting
if settings.IMPORT_TRANSFORMERS.get(self._type):
self.cached_transformers = settings.IMPORT_TRANSFORMERS[self._type]
def get_session_id(self, identifier):
if identifier not in self.session_cache:
self.session_cache[identifier] = LegislativeSession.objects.get(
identifier=identifier, jurisdiction_id=self.jurisdiction_id).id
return self.session_cache[identifier]
# no-ops to be overriden
def prepare_for_db(self, data):
return data
def postimport(self):
pass
def resolve_json_id(self, json_id, allow_no_match=False):
"""
Given an id found in scraped JSON, return a DB id for the object.
params:
json_id: id from json
allow_no_match: just return None if id can't be resolved
returns:
database id
raises:
ValueError if id couldn't be resolved
"""
if not json_id:
return None
if json_id.startswith('~'):
# keep caches of all the pseudo-ids to avoid doing 1000s of lookups during import
if json_id not in self.pseudo_id_cache:
spec = get_pseudo_id(json_id)
spec = self.limit_spec(spec)
if isinstance(spec, Q):
objects = self.model_class.objects.filter(spec)
else:
objects = self.model_class.objects.filter(**spec)
ids = {each.id for each in objects}
if len(ids) == 1:
self.pseudo_id_cache[json_id] = ids.pop()
errmsg = None
elif not ids:
errmsg = 'cannot resolve pseudo id to {}: {}'.format(
self.model_class.__name__, json_id)
else:
errmsg = 'multiple objects returned for {} pseudo id {}: {}'.format(
self.model_class.__name__, json_id, ids)
# either raise or log error
if errmsg:
if not allow_no_match:
raise UnresolvedIdError(errmsg)
else:
self.error(errmsg)
self.pseudo_id_cache[json_id] = None
# return the cached object
return self.pseudo_id_cache[json_id]
# get the id that the duplicate points to, or use self
json_id = self.duplicates.get(json_id, json_id)
try:
return self.json_to_db_id[json_id]
except KeyError:
raise UnresolvedIdError('cannot resolve id: {}'.format(json_id))
def import_directory(self, datadir):
""" import a JSON directory into the database """
def json_stream():
# load all json, mapped by json_id
for fname in glob.glob(os.path.join(datadir, self._type + '_*.json')):
with open(fname) as f:
yield json.load(f)
return self.import_data(json_stream())
def _prepare_imports(self, dicts):
""" filters the import stream to remove duplicates
also serves as a good place to override if anything special has to be done to the
order of the import stream (see OrganizationImporter)
"""
# hash(json): id
seen_hashes = {}
for data in dicts:
json_id = data.pop('_id')
# map duplicates (using omnihash to tell if json dicts are identical-ish)
objhash = omnihash(data)
if objhash not in seen_hashes:
seen_hashes[objhash] = json_id
yield json_id, data
else:
self.duplicates[json_id] = seen_hashes[objhash]
def import_data(self, data_items):
""" import a bunch of dicts together """
# keep counts of all actions
record = {
'insert': 0, 'update': 0, 'noop': 0,
'start': utcnow(),
'records': {
'insert': [],
'update': [],
'noop': [],
}
}
for json_id, data in self._prepare_imports(data_items):
obj_id, what = self.import_item(data)
self.json_to_db_id[json_id] = obj_id
record['records'][what].append(obj_id)
record[what] += 1
# all objects are loaded, a perfect time to do inter-object resolution and other tasks
self.postimport()
record['end'] = utcnow()
return {self._type: record}
def import_item(self, data):
""" function used by import_data """
what = 'noop'
# remove the JSON _id (may still be there if called directly)
data.pop('_id', None)
# add fields/etc.
data = self.apply_transformers(data)
data = self.prepare_for_db(data)
try:
obj = self.get_object(data)
except self.model_class.DoesNotExist:
obj = None
# remove pupa_id which does not belong in the OCD data models
pupa_id = data.pop('pupa_id', None)
# pull related fields off
related = {}
for field in self.related_models:
related[field] = data.pop(field)
# obj existed, check if we need to do an update
if obj:
if obj.id in self.json_to_db_id.values():
raise DuplicateItemError(data, obj, related.get('sources', []))
# check base object for changes
for key, value in data.items():
if getattr(obj, key) != value and key not in obj.locked_fields:
setattr(obj, key, value)
what = 'update'
updated = self._update_related(obj, related, self.related_models)
if updated:
what = 'update'
if what == 'update':
obj.save()
# need to create the data
else:
what = 'insert'
try:
obj = self.model_class.objects.create(**data)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, data,
self.model_class))
self._create_related(obj, related, self.related_models)
if pupa_id:
Identifier.objects.get_or_create(identifier=pupa_id,
jurisdiction_id=self.jurisdiction_id,
defaults={'content_object': obj})
return obj.id, what
def _create_related(self, obj, related, subfield_dict):
"""
create DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
for field, items in related.items():
subobjects = []
all_subrelated = []
Subtype, reverse_id_field, subsubdict = subfield_dict[field]
for order, item in enumerate(items):
# pull off 'subrelated' (things that are related to this obj)
subrelated = {}
for subfield in subsubdict:
subrelated[subfield] = item.pop(subfield)
if field in self.preserve_order:
item['order'] = order
item[reverse_id_field] = obj.id
try:
subobjects.append(Subtype(**item))
all_subrelated.append(subrelated)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, item, Subtype))
# add all subobjects at once (really great for actions & votes)
try:
Subtype.objects.bulk_create(subobjects)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, subobjects, Subtype))
# after import the subobjects, import their subsubobjects
for subobj, subrel in zip(subobjects, all_subrelated):
self._create_related(subobj, subrel, subsubdict)
def lookup_obj_id(self, pupa_id, model):
content_type = ContentType.objects.get_for_model(model)
try:
obj_id = Identifier.objects.get(identifier=pupa_id,
content_type=content_type,
jurisdiction_id=self.jurisdiction_id).object_id
except Identifier.DoesNotExist:
obj_id = None
return obj_id
def apply_transformers(self, data, transformers=None):
if transformers is None:
transformers = self.cached_transformers
for key, key_transformers in transformers.items():
if key not in data:
continue
if isinstance(key_transformers, list):
for transformer in key_transformers:
data[key] = transformer(data[key])
elif isinstance(key_transformers, dict):
self.apply_transformers(data[key], key_transformers)
else:
data[key] = key_transformers(data[key])
return data
def get_seen_sessions(self):
return self.session_cache.values()
|
opencivicdata/pupa | pupa/importers/base.py | BaseImporter._create_related | python | def _create_related(self, obj, related, subfield_dict):
for field, items in related.items():
subobjects = []
all_subrelated = []
Subtype, reverse_id_field, subsubdict = subfield_dict[field]
for order, item in enumerate(items):
# pull off 'subrelated' (things that are related to this obj)
subrelated = {}
for subfield in subsubdict:
subrelated[subfield] = item.pop(subfield)
if field in self.preserve_order:
item['order'] = order
item[reverse_id_field] = obj.id
try:
subobjects.append(Subtype(**item))
all_subrelated.append(subrelated)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, item, Subtype))
# add all subobjects at once (really great for actions & votes)
try:
Subtype.objects.bulk_create(subobjects)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, subobjects, Subtype))
# after import the subobjects, import their subsubobjects
for subobj, subrel in zip(subobjects, all_subrelated):
self._create_related(subobj, subrel, subsubdict) | create DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/importers/base.py#L371-L407 | null | class BaseImporter(object):
""" BaseImporter
Override:
get_object(data)
limit_spec(spec) [optional, required if pseudo_ids are used]
prepare_for_db(data) [optional]
postimport() [optional]
"""
_type = None
model_class = None
related_models = {}
preserve_order = set()
merge_related = {}
cached_transformers = {}
def __init__(self, jurisdiction_id):
self.jurisdiction_id = jurisdiction_id
self.json_to_db_id = {}
self.duplicates = {}
self.pseudo_id_cache = {}
self.session_cache = {}
self.logger = logging.getLogger("pupa")
self.info = self.logger.info
self.debug = self.logger.debug
self.warning = self.logger.warning
self.error = self.logger.error
self.critical = self.logger.critical
# load transformers from appropriate setting
if settings.IMPORT_TRANSFORMERS.get(self._type):
self.cached_transformers = settings.IMPORT_TRANSFORMERS[self._type]
def get_session_id(self, identifier):
if identifier not in self.session_cache:
self.session_cache[identifier] = LegislativeSession.objects.get(
identifier=identifier, jurisdiction_id=self.jurisdiction_id).id
return self.session_cache[identifier]
# no-ops to be overriden
def prepare_for_db(self, data):
return data
def postimport(self):
pass
def resolve_json_id(self, json_id, allow_no_match=False):
"""
Given an id found in scraped JSON, return a DB id for the object.
params:
json_id: id from json
allow_no_match: just return None if id can't be resolved
returns:
database id
raises:
ValueError if id couldn't be resolved
"""
if not json_id:
return None
if json_id.startswith('~'):
# keep caches of all the pseudo-ids to avoid doing 1000s of lookups during import
if json_id not in self.pseudo_id_cache:
spec = get_pseudo_id(json_id)
spec = self.limit_spec(spec)
if isinstance(spec, Q):
objects = self.model_class.objects.filter(spec)
else:
objects = self.model_class.objects.filter(**spec)
ids = {each.id for each in objects}
if len(ids) == 1:
self.pseudo_id_cache[json_id] = ids.pop()
errmsg = None
elif not ids:
errmsg = 'cannot resolve pseudo id to {}: {}'.format(
self.model_class.__name__, json_id)
else:
errmsg = 'multiple objects returned for {} pseudo id {}: {}'.format(
self.model_class.__name__, json_id, ids)
# either raise or log error
if errmsg:
if not allow_no_match:
raise UnresolvedIdError(errmsg)
else:
self.error(errmsg)
self.pseudo_id_cache[json_id] = None
# return the cached object
return self.pseudo_id_cache[json_id]
# get the id that the duplicate points to, or use self
json_id = self.duplicates.get(json_id, json_id)
try:
return self.json_to_db_id[json_id]
except KeyError:
raise UnresolvedIdError('cannot resolve id: {}'.format(json_id))
def import_directory(self, datadir):
""" import a JSON directory into the database """
def json_stream():
# load all json, mapped by json_id
for fname in glob.glob(os.path.join(datadir, self._type + '_*.json')):
with open(fname) as f:
yield json.load(f)
return self.import_data(json_stream())
def _prepare_imports(self, dicts):
""" filters the import stream to remove duplicates
also serves as a good place to override if anything special has to be done to the
order of the import stream (see OrganizationImporter)
"""
# hash(json): id
seen_hashes = {}
for data in dicts:
json_id = data.pop('_id')
# map duplicates (using omnihash to tell if json dicts are identical-ish)
objhash = omnihash(data)
if objhash not in seen_hashes:
seen_hashes[objhash] = json_id
yield json_id, data
else:
self.duplicates[json_id] = seen_hashes[objhash]
def import_data(self, data_items):
""" import a bunch of dicts together """
# keep counts of all actions
record = {
'insert': 0, 'update': 0, 'noop': 0,
'start': utcnow(),
'records': {
'insert': [],
'update': [],
'noop': [],
}
}
for json_id, data in self._prepare_imports(data_items):
obj_id, what = self.import_item(data)
self.json_to_db_id[json_id] = obj_id
record['records'][what].append(obj_id)
record[what] += 1
# all objects are loaded, a perfect time to do inter-object resolution and other tasks
self.postimport()
record['end'] = utcnow()
return {self._type: record}
def import_item(self, data):
""" function used by import_data """
what = 'noop'
# remove the JSON _id (may still be there if called directly)
data.pop('_id', None)
# add fields/etc.
data = self.apply_transformers(data)
data = self.prepare_for_db(data)
try:
obj = self.get_object(data)
except self.model_class.DoesNotExist:
obj = None
# remove pupa_id which does not belong in the OCD data models
pupa_id = data.pop('pupa_id', None)
# pull related fields off
related = {}
for field in self.related_models:
related[field] = data.pop(field)
# obj existed, check if we need to do an update
if obj:
if obj.id in self.json_to_db_id.values():
raise DuplicateItemError(data, obj, related.get('sources', []))
# check base object for changes
for key, value in data.items():
if getattr(obj, key) != value and key not in obj.locked_fields:
setattr(obj, key, value)
what = 'update'
updated = self._update_related(obj, related, self.related_models)
if updated:
what = 'update'
if what == 'update':
obj.save()
# need to create the data
else:
what = 'insert'
try:
obj = self.model_class.objects.create(**data)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, data,
self.model_class))
self._create_related(obj, related, self.related_models)
if pupa_id:
Identifier.objects.get_or_create(identifier=pupa_id,
jurisdiction_id=self.jurisdiction_id,
defaults={'content_object': obj})
return obj.id, what
def _update_related(self, obj, related, subfield_dict):
"""
update DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
# keep track of whether or not anything was updated
updated = False
# for each related field - check if there are differences
for field, items in related.items():
# skip subitem check if it's locked anyway
if field in obj.locked_fields:
continue
# get items from database
dbitems = list(getattr(obj, field).all())
dbitems_count = len(dbitems)
# default to doing nothing
do_delete = do_update = False
if items and dbitems_count: # we have items, so does db, check for conflict
do_delete = do_update = items_differ(items, dbitems, subfield_dict[field][2])
elif items and not dbitems_count: # we have items, db doesn't, just update
do_update = True
elif not items and dbitems_count: # db has items, we don't, just delete
do_delete = True
# otherwise: no items or dbitems, so nothing is done
# don't delete if field is in merge_related
if field in self.merge_related:
new_items = []
# build a list of keyfields to existing database objects
keylist = self.merge_related[field]
keyed_dbitems = {tuple(getattr(item, k) for k in keylist):
item for item in dbitems}
# go through 'new' items
# if item with the same keyfields exists:
# update the database item w/ the new item's properties
# else:
# add it to new_items
for item in items:
key = tuple(item.get(k) for k in keylist)
dbitem = keyed_dbitems.get(key)
if not dbitem:
new_items.append(item)
else:
# update dbitem
for fname, val in item.items():
setattr(dbitem, fname, val)
dbitem.save()
# import anything that made it to new_items in the usual fashion
self._create_related(obj, {field: new_items}, subfield_dict)
else:
# default logic is to just wipe and recreate subobjects
if do_delete:
updated = True
getattr(obj, field).all().delete()
if do_update:
updated = True
self._create_related(obj, {field: items}, subfield_dict)
return updated
def lookup_obj_id(self, pupa_id, model):
content_type = ContentType.objects.get_for_model(model)
try:
obj_id = Identifier.objects.get(identifier=pupa_id,
content_type=content_type,
jurisdiction_id=self.jurisdiction_id).object_id
except Identifier.DoesNotExist:
obj_id = None
return obj_id
def apply_transformers(self, data, transformers=None):
if transformers is None:
transformers = self.cached_transformers
for key, key_transformers in transformers.items():
if key not in data:
continue
if isinstance(key_transformers, list):
for transformer in key_transformers:
data[key] = transformer(data[key])
elif isinstance(key_transformers, dict):
self.apply_transformers(data[key], key_transformers)
else:
data[key] = key_transformers(data[key])
return data
def get_seen_sessions(self):
return self.session_cache.values()
|
opencivicdata/pupa | pupa/utils/topsort.py | Network.add_edge | python | def add_edge(self, fro, to):
self.add_node(fro)
self.add_node(to)
self.edges[fro].add(to) | When doing topological sorting, the semantics of the edge mean that
the depedency runs from the parent to the child - which is to say that
the parent is required to be sorted *before* the child.
[ FROM ] ------> [ TO ]
Committee on Finance -> Subcommittee of the Finance Committee on Budget
-> Subcommittee of the Finance Committee on Roads | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/utils/topsort.py#L31-L43 | [
"def add_node(self, node):\n \"\"\" Add a node to the graph (with no edges) \"\"\"\n self.nodes.add(node)\n"
] | class Network(object):
"""
This object (the `Network` object) handles keeping track of all the
graph's nodes, and links between the nodes.
The `Network' object is mostly used to topologically sort the nodes,
to handle dependency resolution.
"""
def __init__(self):
self.nodes = set()
self.edges = defaultdict(set)
def add_node(self, node):
""" Add a node to the graph (with no edges) """
self.nodes.add(node)
def leaf_nodes(self):
"""
Return an interable of nodes with no edges pointing at them. This is
helpful to find all nodes without dependencies.
"""
# Now contains all nodes that contain dependencies.
deps = {item for sublist in self.edges.values() for item in sublist}
# contains all nodes *without* any dependencies (leaf nodes)
return self.nodes - deps
def prune_node(self, node, remove_backrefs=False):
"""
remove node `node` from the network (including any edges that may
have been pointing at `node`).
"""
if not remove_backrefs:
for fro, connections in self.edges.items():
if node in self.edges[fro]:
raise ValueError("""Attempting to remove a node with
backrefs. You may consider setting
`remove_backrefs` to true.""")
# OK. Otherwise, let's do our removal.
self.nodes.remove(node)
if node in self.edges:
# Remove add edges from this node if we're pruning it.
self.edges.pop(node)
for fro, connections in self.edges.items():
# Remove any links to this node (if they exist)
if node in self.edges[fro]:
# If we should remove backrefs:
self.edges[fro].remove(node)
def sort(self):
"""
Return an iterable of nodes, toplogically sorted to correctly import
dependencies before leaf nodes.
"""
while self.nodes:
iterated = False
for node in self.leaf_nodes():
iterated = True
self.prune_node(node)
yield node
if not iterated:
raise CyclicGraphError("Sorting has found a cyclic graph.")
def dot(self):
"""
Return a buffer that represents something dot(1) can render.
"""
buff = "digraph graphname {"
for fro in self.edges:
for to in self.edges[fro]:
buff += "%s -> %s;" % (fro, to)
buff += "}"
return buff
def cycles(self):
"""
Fairly expensive cycle detection algorithm. This method
will return the shortest unique cycles that were detected.
Debug usage may look something like:
print("The following cycles were found:")
for cycle in network.cycles():
print(" ", " -> ".join(cycle))
"""
def walk_node(node, seen):
"""
Walk each top-level node we know about, and recurse
along the graph.
"""
if node in seen:
yield (node,)
return
seen.add(node)
for edge in self.edges[node]:
for cycle in walk_node(edge, set(seen)):
yield (node,) + cycle
# First, let's get a iterable of all known cycles.
cycles = chain.from_iterable(
(walk_node(node, set()) for node in self.nodes))
shortest = set()
# Now, let's go through and sift through the cycles, finding
# the shortest unique cycle known, ignoring cycles which contain
# already known cycles.
for cycle in sorted(cycles, key=len):
for el in shortest:
if set(el).issubset(set(cycle)):
break
else:
shortest.add(cycle)
# And return that unique list.
return shortest
|
opencivicdata/pupa | pupa/utils/topsort.py | Network.leaf_nodes | python | def leaf_nodes(self):
# Now contains all nodes that contain dependencies.
deps = {item for sublist in self.edges.values() for item in sublist}
# contains all nodes *without* any dependencies (leaf nodes)
return self.nodes - deps | Return an interable of nodes with no edges pointing at them. This is
helpful to find all nodes without dependencies. | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/utils/topsort.py#L45-L53 | null | class Network(object):
"""
This object (the `Network` object) handles keeping track of all the
graph's nodes, and links between the nodes.
The `Network' object is mostly used to topologically sort the nodes,
to handle dependency resolution.
"""
def __init__(self):
self.nodes = set()
self.edges = defaultdict(set)
def add_node(self, node):
""" Add a node to the graph (with no edges) """
self.nodes.add(node)
def add_edge(self, fro, to):
"""
When doing topological sorting, the semantics of the edge mean that
the depedency runs from the parent to the child - which is to say that
the parent is required to be sorted *before* the child.
[ FROM ] ------> [ TO ]
Committee on Finance -> Subcommittee of the Finance Committee on Budget
-> Subcommittee of the Finance Committee on Roads
"""
self.add_node(fro)
self.add_node(to)
self.edges[fro].add(to)
def prune_node(self, node, remove_backrefs=False):
"""
remove node `node` from the network (including any edges that may
have been pointing at `node`).
"""
if not remove_backrefs:
for fro, connections in self.edges.items():
if node in self.edges[fro]:
raise ValueError("""Attempting to remove a node with
backrefs. You may consider setting
`remove_backrefs` to true.""")
# OK. Otherwise, let's do our removal.
self.nodes.remove(node)
if node in self.edges:
# Remove add edges from this node if we're pruning it.
self.edges.pop(node)
for fro, connections in self.edges.items():
# Remove any links to this node (if they exist)
if node in self.edges[fro]:
# If we should remove backrefs:
self.edges[fro].remove(node)
def sort(self):
"""
Return an iterable of nodes, toplogically sorted to correctly import
dependencies before leaf nodes.
"""
while self.nodes:
iterated = False
for node in self.leaf_nodes():
iterated = True
self.prune_node(node)
yield node
if not iterated:
raise CyclicGraphError("Sorting has found a cyclic graph.")
def dot(self):
"""
Return a buffer that represents something dot(1) can render.
"""
buff = "digraph graphname {"
for fro in self.edges:
for to in self.edges[fro]:
buff += "%s -> %s;" % (fro, to)
buff += "}"
return buff
def cycles(self):
"""
Fairly expensive cycle detection algorithm. This method
will return the shortest unique cycles that were detected.
Debug usage may look something like:
print("The following cycles were found:")
for cycle in network.cycles():
print(" ", " -> ".join(cycle))
"""
def walk_node(node, seen):
"""
Walk each top-level node we know about, and recurse
along the graph.
"""
if node in seen:
yield (node,)
return
seen.add(node)
for edge in self.edges[node]:
for cycle in walk_node(edge, set(seen)):
yield (node,) + cycle
# First, let's get a iterable of all known cycles.
cycles = chain.from_iterable(
(walk_node(node, set()) for node in self.nodes))
shortest = set()
# Now, let's go through and sift through the cycles, finding
# the shortest unique cycle known, ignoring cycles which contain
# already known cycles.
for cycle in sorted(cycles, key=len):
for el in shortest:
if set(el).issubset(set(cycle)):
break
else:
shortest.add(cycle)
# And return that unique list.
return shortest
|
opencivicdata/pupa | pupa/utils/topsort.py | Network.prune_node | python | def prune_node(self, node, remove_backrefs=False):
if not remove_backrefs:
for fro, connections in self.edges.items():
if node in self.edges[fro]:
raise ValueError("""Attempting to remove a node with
backrefs. You may consider setting
`remove_backrefs` to true.""")
# OK. Otherwise, let's do our removal.
self.nodes.remove(node)
if node in self.edges:
# Remove add edges from this node if we're pruning it.
self.edges.pop(node)
for fro, connections in self.edges.items():
# Remove any links to this node (if they exist)
if node in self.edges[fro]:
# If we should remove backrefs:
self.edges[fro].remove(node) | remove node `node` from the network (including any edges that may
have been pointing at `node`). | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/utils/topsort.py#L55-L78 | null | class Network(object):
"""
This object (the `Network` object) handles keeping track of all the
graph's nodes, and links between the nodes.
The `Network' object is mostly used to topologically sort the nodes,
to handle dependency resolution.
"""
def __init__(self):
self.nodes = set()
self.edges = defaultdict(set)
def add_node(self, node):
""" Add a node to the graph (with no edges) """
self.nodes.add(node)
def add_edge(self, fro, to):
"""
When doing topological sorting, the semantics of the edge mean that
the depedency runs from the parent to the child - which is to say that
the parent is required to be sorted *before* the child.
[ FROM ] ------> [ TO ]
Committee on Finance -> Subcommittee of the Finance Committee on Budget
-> Subcommittee of the Finance Committee on Roads
"""
self.add_node(fro)
self.add_node(to)
self.edges[fro].add(to)
def leaf_nodes(self):
"""
Return an interable of nodes with no edges pointing at them. This is
helpful to find all nodes without dependencies.
"""
# Now contains all nodes that contain dependencies.
deps = {item for sublist in self.edges.values() for item in sublist}
# contains all nodes *without* any dependencies (leaf nodes)
return self.nodes - deps
def sort(self):
"""
Return an iterable of nodes, toplogically sorted to correctly import
dependencies before leaf nodes.
"""
while self.nodes:
iterated = False
for node in self.leaf_nodes():
iterated = True
self.prune_node(node)
yield node
if not iterated:
raise CyclicGraphError("Sorting has found a cyclic graph.")
def dot(self):
"""
Return a buffer that represents something dot(1) can render.
"""
buff = "digraph graphname {"
for fro in self.edges:
for to in self.edges[fro]:
buff += "%s -> %s;" % (fro, to)
buff += "}"
return buff
def cycles(self):
"""
Fairly expensive cycle detection algorithm. This method
will return the shortest unique cycles that were detected.
Debug usage may look something like:
print("The following cycles were found:")
for cycle in network.cycles():
print(" ", " -> ".join(cycle))
"""
def walk_node(node, seen):
"""
Walk each top-level node we know about, and recurse
along the graph.
"""
if node in seen:
yield (node,)
return
seen.add(node)
for edge in self.edges[node]:
for cycle in walk_node(edge, set(seen)):
yield (node,) + cycle
# First, let's get a iterable of all known cycles.
cycles = chain.from_iterable(
(walk_node(node, set()) for node in self.nodes))
shortest = set()
# Now, let's go through and sift through the cycles, finding
# the shortest unique cycle known, ignoring cycles which contain
# already known cycles.
for cycle in sorted(cycles, key=len):
for el in shortest:
if set(el).issubset(set(cycle)):
break
else:
shortest.add(cycle)
# And return that unique list.
return shortest
|
opencivicdata/pupa | pupa/utils/topsort.py | Network.sort | python | def sort(self):
while self.nodes:
iterated = False
for node in self.leaf_nodes():
iterated = True
self.prune_node(node)
yield node
if not iterated:
raise CyclicGraphError("Sorting has found a cyclic graph.") | Return an iterable of nodes, toplogically sorted to correctly import
dependencies before leaf nodes. | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/utils/topsort.py#L80-L92 | [
"def leaf_nodes(self):\n \"\"\"\n Return an interable of nodes with no edges pointing at them. This is\n helpful to find all nodes without dependencies.\n \"\"\"\n # Now contains all nodes that contain dependencies.\n deps = {item for sublist in self.edges.values() for item in sublist}\n # cont... | class Network(object):
"""
This object (the `Network` object) handles keeping track of all the
graph's nodes, and links between the nodes.
The `Network' object is mostly used to topologically sort the nodes,
to handle dependency resolution.
"""
def __init__(self):
self.nodes = set()
self.edges = defaultdict(set)
def add_node(self, node):
""" Add a node to the graph (with no edges) """
self.nodes.add(node)
def add_edge(self, fro, to):
"""
When doing topological sorting, the semantics of the edge mean that
the depedency runs from the parent to the child - which is to say that
the parent is required to be sorted *before* the child.
[ FROM ] ------> [ TO ]
Committee on Finance -> Subcommittee of the Finance Committee on Budget
-> Subcommittee of the Finance Committee on Roads
"""
self.add_node(fro)
self.add_node(to)
self.edges[fro].add(to)
def leaf_nodes(self):
"""
Return an interable of nodes with no edges pointing at them. This is
helpful to find all nodes without dependencies.
"""
# Now contains all nodes that contain dependencies.
deps = {item for sublist in self.edges.values() for item in sublist}
# contains all nodes *without* any dependencies (leaf nodes)
return self.nodes - deps
def prune_node(self, node, remove_backrefs=False):
"""
remove node `node` from the network (including any edges that may
have been pointing at `node`).
"""
if not remove_backrefs:
for fro, connections in self.edges.items():
if node in self.edges[fro]:
raise ValueError("""Attempting to remove a node with
backrefs. You may consider setting
`remove_backrefs` to true.""")
# OK. Otherwise, let's do our removal.
self.nodes.remove(node)
if node in self.edges:
# Remove add edges from this node if we're pruning it.
self.edges.pop(node)
for fro, connections in self.edges.items():
# Remove any links to this node (if they exist)
if node in self.edges[fro]:
# If we should remove backrefs:
self.edges[fro].remove(node)
def dot(self):
"""
Return a buffer that represents something dot(1) can render.
"""
buff = "digraph graphname {"
for fro in self.edges:
for to in self.edges[fro]:
buff += "%s -> %s;" % (fro, to)
buff += "}"
return buff
def cycles(self):
"""
Fairly expensive cycle detection algorithm. This method
will return the shortest unique cycles that were detected.
Debug usage may look something like:
print("The following cycles were found:")
for cycle in network.cycles():
print(" ", " -> ".join(cycle))
"""
def walk_node(node, seen):
"""
Walk each top-level node we know about, and recurse
along the graph.
"""
if node in seen:
yield (node,)
return
seen.add(node)
for edge in self.edges[node]:
for cycle in walk_node(edge, set(seen)):
yield (node,) + cycle
# First, let's get a iterable of all known cycles.
cycles = chain.from_iterable(
(walk_node(node, set()) for node in self.nodes))
shortest = set()
# Now, let's go through and sift through the cycles, finding
# the shortest unique cycle known, ignoring cycles which contain
# already known cycles.
for cycle in sorted(cycles, key=len):
for el in shortest:
if set(el).issubset(set(cycle)):
break
else:
shortest.add(cycle)
# And return that unique list.
return shortest
|
opencivicdata/pupa | pupa/utils/topsort.py | Network.dot | python | def dot(self):
buff = "digraph graphname {"
for fro in self.edges:
for to in self.edges[fro]:
buff += "%s -> %s;" % (fro, to)
buff += "}"
return buff | Return a buffer that represents something dot(1) can render. | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/utils/topsort.py#L94-L103 | null | class Network(object):
"""
This object (the `Network` object) handles keeping track of all the
graph's nodes, and links between the nodes.
The `Network' object is mostly used to topologically sort the nodes,
to handle dependency resolution.
"""
def __init__(self):
self.nodes = set()
self.edges = defaultdict(set)
def add_node(self, node):
""" Add a node to the graph (with no edges) """
self.nodes.add(node)
def add_edge(self, fro, to):
"""
When doing topological sorting, the semantics of the edge mean that
the depedency runs from the parent to the child - which is to say that
the parent is required to be sorted *before* the child.
[ FROM ] ------> [ TO ]
Committee on Finance -> Subcommittee of the Finance Committee on Budget
-> Subcommittee of the Finance Committee on Roads
"""
self.add_node(fro)
self.add_node(to)
self.edges[fro].add(to)
def leaf_nodes(self):
"""
Return an interable of nodes with no edges pointing at them. This is
helpful to find all nodes without dependencies.
"""
# Now contains all nodes that contain dependencies.
deps = {item for sublist in self.edges.values() for item in sublist}
# contains all nodes *without* any dependencies (leaf nodes)
return self.nodes - deps
def prune_node(self, node, remove_backrefs=False):
"""
remove node `node` from the network (including any edges that may
have been pointing at `node`).
"""
if not remove_backrefs:
for fro, connections in self.edges.items():
if node in self.edges[fro]:
raise ValueError("""Attempting to remove a node with
backrefs. You may consider setting
`remove_backrefs` to true.""")
# OK. Otherwise, let's do our removal.
self.nodes.remove(node)
if node in self.edges:
# Remove add edges from this node if we're pruning it.
self.edges.pop(node)
for fro, connections in self.edges.items():
# Remove any links to this node (if they exist)
if node in self.edges[fro]:
# If we should remove backrefs:
self.edges[fro].remove(node)
def sort(self):
"""
Return an iterable of nodes, toplogically sorted to correctly import
dependencies before leaf nodes.
"""
while self.nodes:
iterated = False
for node in self.leaf_nodes():
iterated = True
self.prune_node(node)
yield node
if not iterated:
raise CyclicGraphError("Sorting has found a cyclic graph.")
def cycles(self):
"""
Fairly expensive cycle detection algorithm. This method
will return the shortest unique cycles that were detected.
Debug usage may look something like:
print("The following cycles were found:")
for cycle in network.cycles():
print(" ", " -> ".join(cycle))
"""
def walk_node(node, seen):
"""
Walk each top-level node we know about, and recurse
along the graph.
"""
if node in seen:
yield (node,)
return
seen.add(node)
for edge in self.edges[node]:
for cycle in walk_node(edge, set(seen)):
yield (node,) + cycle
# First, let's get a iterable of all known cycles.
cycles = chain.from_iterable(
(walk_node(node, set()) for node in self.nodes))
shortest = set()
# Now, let's go through and sift through the cycles, finding
# the shortest unique cycle known, ignoring cycles which contain
# already known cycles.
for cycle in sorted(cycles, key=len):
for el in shortest:
if set(el).issubset(set(cycle)):
break
else:
shortest.add(cycle)
# And return that unique list.
return shortest
|
opencivicdata/pupa | pupa/utils/topsort.py | Network.cycles | python | def cycles(self):
def walk_node(node, seen):
"""
Walk each top-level node we know about, and recurse
along the graph.
"""
if node in seen:
yield (node,)
return
seen.add(node)
for edge in self.edges[node]:
for cycle in walk_node(edge, set(seen)):
yield (node,) + cycle
# First, let's get a iterable of all known cycles.
cycles = chain.from_iterable(
(walk_node(node, set()) for node in self.nodes))
shortest = set()
# Now, let's go through and sift through the cycles, finding
# the shortest unique cycle known, ignoring cycles which contain
# already known cycles.
for cycle in sorted(cycles, key=len):
for el in shortest:
if set(el).issubset(set(cycle)):
break
else:
shortest.add(cycle)
# And return that unique list.
return shortest | Fairly expensive cycle detection algorithm. This method
will return the shortest unique cycles that were detected.
Debug usage may look something like:
print("The following cycles were found:")
for cycle in network.cycles():
print(" ", " -> ".join(cycle)) | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/utils/topsort.py#L105-L145 | null | class Network(object):
"""
This object (the `Network` object) handles keeping track of all the
graph's nodes, and links between the nodes.
The `Network' object is mostly used to topologically sort the nodes,
to handle dependency resolution.
"""
def __init__(self):
self.nodes = set()
self.edges = defaultdict(set)
def add_node(self, node):
""" Add a node to the graph (with no edges) """
self.nodes.add(node)
def add_edge(self, fro, to):
"""
When doing topological sorting, the semantics of the edge mean that
the depedency runs from the parent to the child - which is to say that
the parent is required to be sorted *before* the child.
[ FROM ] ------> [ TO ]
Committee on Finance -> Subcommittee of the Finance Committee on Budget
-> Subcommittee of the Finance Committee on Roads
"""
self.add_node(fro)
self.add_node(to)
self.edges[fro].add(to)
def leaf_nodes(self):
"""
Return an interable of nodes with no edges pointing at them. This is
helpful to find all nodes without dependencies.
"""
# Now contains all nodes that contain dependencies.
deps = {item for sublist in self.edges.values() for item in sublist}
# contains all nodes *without* any dependencies (leaf nodes)
return self.nodes - deps
def prune_node(self, node, remove_backrefs=False):
"""
remove node `node` from the network (including any edges that may
have been pointing at `node`).
"""
if not remove_backrefs:
for fro, connections in self.edges.items():
if node in self.edges[fro]:
raise ValueError("""Attempting to remove a node with
backrefs. You may consider setting
`remove_backrefs` to true.""")
# OK. Otherwise, let's do our removal.
self.nodes.remove(node)
if node in self.edges:
# Remove add edges from this node if we're pruning it.
self.edges.pop(node)
for fro, connections in self.edges.items():
# Remove any links to this node (if they exist)
if node in self.edges[fro]:
# If we should remove backrefs:
self.edges[fro].remove(node)
def sort(self):
"""
Return an iterable of nodes, toplogically sorted to correctly import
dependencies before leaf nodes.
"""
while self.nodes:
iterated = False
for node in self.leaf_nodes():
iterated = True
self.prune_node(node)
yield node
if not iterated:
raise CyclicGraphError("Sorting has found a cyclic graph.")
def dot(self):
"""
Return a buffer that represents something dot(1) can render.
"""
buff = "digraph graphname {"
for fro in self.edges:
for to in self.edges[fro]:
buff += "%s -> %s;" % (fro, to)
buff += "}"
return buff
|
opencivicdata/pupa | pupa/scrape/popolo.py | pseudo_organization | python | def pseudo_organization(organization, classification, default=None):
if organization and classification:
raise ScrapeValueError('cannot specify both classification and organization')
elif classification:
return _make_pseudo_id(classification=classification)
elif organization:
if isinstance(organization, Organization):
return organization._id
elif isinstance(organization, str):
return organization
else:
return _make_pseudo_id(**organization)
elif default is not None:
return _make_pseudo_id(classification=default)
else:
return None | helper for setting an appropriate ID for organizations | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/scrape/popolo.py#L214-L230 | [
"def _make_pseudo_id(**kwargs):\n \"\"\" pseudo ids are just JSON \"\"\"\n # ensure keys are sorted so that these are deterministic\n return '~' + json.dumps(kwargs, sort_keys=True)\n"
] | import copy
from .base import (BaseModel, SourceMixin, LinkMixin, ContactDetailMixin, OtherNameMixin,
IdentifierMixin)
from .schemas.post import schema as post_schema
from .schemas.person import schema as person_schema
from .schemas.membership import schema as membership_schema
from .schemas.organization import schema as org_schema
from ..utils import _make_pseudo_id
from pupa.exceptions import ScrapeValueError
# a copy of the org schema without sources
org_schema_no_sources = copy.deepcopy(org_schema)
org_schema_no_sources['properties'].pop('sources')
class Post(BaseModel, LinkMixin, ContactDetailMixin):
"""
A popolo-style Post
"""
_type = 'post'
_schema = post_schema
def __init__(self, *, label, role, organization_id=None, chamber=None,
division_id=None, start_date='', end_date='',
maximum_memberships=1):
super(Post, self).__init__()
self.label = label
self.role = role
self.organization_id = pseudo_organization(organization_id, chamber)
self.division_id = division_id
self.start_date = start_date
self.end_date = end_date
self.maximum_memberships = maximum_memberships
def __str__(self):
return self.label
class Membership(BaseModel, ContactDetailMixin, LinkMixin):
"""
A popolo-style Membership.
"""
_type = 'membership'
_schema = membership_schema
def __init__(self, *, person_id, organization_id, post_id=None, role='', label='',
start_date='', end_date='', on_behalf_of_id=None,
person_name=''
):
"""
Constructor for the Membership object.
We require a person ID and organization ID, as required by the
popolo spec. Additional arguments may be given, which match those
defined by popolo.
"""
super(Membership, self).__init__()
self.person_id = person_id
self.person_name = person_name
self.organization_id = organization_id
self.post_id = post_id
self.start_date = start_date
self.end_date = end_date
self.role = role
self.label = label
self.on_behalf_of_id = on_behalf_of_id
def __str__(self):
return self.person_id + ' membership in ' + self.organization_id
class Person(BaseModel, SourceMixin, ContactDetailMixin, LinkMixin, IdentifierMixin,
OtherNameMixin):
"""
Details for a Person in Popolo format.
"""
_type = 'person'
_schema = person_schema
def __init__(self, name, *, birth_date='', death_date='', biography='', summary='', image='',
gender='', national_identity='',
# specialty fields
district=None, party=None, primary_org='', role='',
start_date='', end_date='', primary_org_name=None):
super(Person, self).__init__()
self.name = name
self.birth_date = birth_date
self.death_date = death_date
self.biography = biography
self.summary = summary
self.image = image
self.gender = gender
self.national_identity = national_identity
if primary_org:
self.add_term(role, primary_org, district=district,
start_date=start_date, end_date=end_date,
org_name=primary_org_name)
if party:
self.add_party(party)
def add_membership(self, name_or_org, role='member', **kwargs):
"""
add a membership in an organization and return the membership
object in case there are more details to add
"""
if isinstance(name_or_org, Organization):
membership = Membership(person_id=self._id,
person_name=self.name,
organization_id=name_or_org._id,
role=role, **kwargs)
else:
membership = Membership(person_id=self._id,
person_name=self.name,
organization_id=_make_pseudo_id(name=name_or_org),
role=role, **kwargs)
self._related.append(membership)
return membership
def add_party(self, party, **kwargs):
membership = Membership(
person_id=self._id,
person_name=self.name,
organization_id=_make_pseudo_id(classification="party", name=party),
role='member', **kwargs)
self._related.append(membership)
def add_term(self, role, org_classification, *, district=None,
start_date='', end_date='', label='', org_name=None,
appointment=False):
if org_name:
org_id = _make_pseudo_id(classification=org_classification,
name=org_name)
else:
org_id = _make_pseudo_id(classification=org_classification)
if district:
if role:
post_id = _make_pseudo_id(label=district,
role=role,
organization__classification=org_classification)
else:
post_id = _make_pseudo_id(label=district,
organization__classification=org_classification)
elif appointment:
post_id = _make_pseudo_id(role=role,
organization__classification=org_classification)
else:
post_id = None
membership = Membership(person_id=self._id, person_name=self.name,
organization_id=org_id, post_id=post_id,
role=role, start_date=start_date, end_date=end_date, label=label)
self._related.append(membership)
return membership
def __str__(self):
return self.name
class Organization(BaseModel, SourceMixin, ContactDetailMixin, LinkMixin, IdentifierMixin,
OtherNameMixin):
"""
A single popolo-style Organization
"""
_type = 'organization'
_schema = org_schema
def __init__(self, name, *, classification='', parent_id=None,
founding_date='', dissolution_date='', image='',
chamber=None):
"""
Constructor for the Organization object.
"""
super(Organization, self).__init__()
self.name = name
self.classification = classification
self.founding_date = founding_date
self.dissolution_date = dissolution_date
self.parent_id = pseudo_organization(parent_id, chamber)
self.image = image
def __str__(self):
return self.name
def validate(self):
schema = None
# these are implicitly declared & do not require sources
if self.classification in ('party', 'legislature', 'upper', 'lower', 'executive'):
schema = org_schema_no_sources
return super(Organization, self).validate(schema=schema)
def add_post(self, label, role, **kwargs):
post = Post(label=label, role=role, organization_id=self._id, **kwargs)
self._related.append(post)
return post
def add_member(self, name_or_person, role='member', **kwargs):
if isinstance(name_or_person, Person):
membership = Membership(person_id=name_or_person._id,
person_name=name_or_person.name,
organization_id=self._id,
role=role, **kwargs)
else:
membership = Membership(person_id=_make_pseudo_id(name=name_or_person),
person_name=name_or_person,
organization_id=self._id, role=role, **kwargs)
self._related.append(membership)
return membership
|
opencivicdata/pupa | pupa/scrape/popolo.py | Person.add_membership | python | def add_membership(self, name_or_org, role='member', **kwargs):
if isinstance(name_or_org, Organization):
membership = Membership(person_id=self._id,
person_name=self.name,
organization_id=name_or_org._id,
role=role, **kwargs)
else:
membership = Membership(person_id=self._id,
person_name=self.name,
organization_id=_make_pseudo_id(name=name_or_org),
role=role, **kwargs)
self._related.append(membership)
return membership | add a membership in an organization and return the membership
object in case there are more details to add | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/scrape/popolo.py#L104-L120 | [
"def _make_pseudo_id(**kwargs):\n \"\"\" pseudo ids are just JSON \"\"\"\n # ensure keys are sorted so that these are deterministic\n return '~' + json.dumps(kwargs, sort_keys=True)\n"
] | class Person(BaseModel, SourceMixin, ContactDetailMixin, LinkMixin, IdentifierMixin,
OtherNameMixin):
"""
Details for a Person in Popolo format.
"""
_type = 'person'
_schema = person_schema
def __init__(self, name, *, birth_date='', death_date='', biography='', summary='', image='',
gender='', national_identity='',
# specialty fields
district=None, party=None, primary_org='', role='',
start_date='', end_date='', primary_org_name=None):
super(Person, self).__init__()
self.name = name
self.birth_date = birth_date
self.death_date = death_date
self.biography = biography
self.summary = summary
self.image = image
self.gender = gender
self.national_identity = national_identity
if primary_org:
self.add_term(role, primary_org, district=district,
start_date=start_date, end_date=end_date,
org_name=primary_org_name)
if party:
self.add_party(party)
def add_party(self, party, **kwargs):
membership = Membership(
person_id=self._id,
person_name=self.name,
organization_id=_make_pseudo_id(classification="party", name=party),
role='member', **kwargs)
self._related.append(membership)
def add_term(self, role, org_classification, *, district=None,
start_date='', end_date='', label='', org_name=None,
appointment=False):
if org_name:
org_id = _make_pseudo_id(classification=org_classification,
name=org_name)
else:
org_id = _make_pseudo_id(classification=org_classification)
if district:
if role:
post_id = _make_pseudo_id(label=district,
role=role,
organization__classification=org_classification)
else:
post_id = _make_pseudo_id(label=district,
organization__classification=org_classification)
elif appointment:
post_id = _make_pseudo_id(role=role,
organization__classification=org_classification)
else:
post_id = None
membership = Membership(person_id=self._id, person_name=self.name,
organization_id=org_id, post_id=post_id,
role=role, start_date=start_date, end_date=end_date, label=label)
self._related.append(membership)
return membership
def __str__(self):
return self.name
|
opencivicdata/pupa | pupa/scrape/base.py | Scraper.save_object | python | def save_object(self, obj):
obj.pre_save(self.jurisdiction.jurisdiction_id)
filename = '{0}_{1}.json'.format(obj._type, obj._id).replace('/', '-')
self.info('save %s %s as %s', obj._type, obj, filename)
self.debug(json.dumps(OrderedDict(sorted(obj.as_dict().items())),
cls=utils.JSONEncoderPlus, indent=4, separators=(',', ': ')))
self.output_names[obj._type].add(filename)
with open(os.path.join(self.datadir, filename), 'w') as f:
json.dump(obj.as_dict(), f, cls=utils.JSONEncoderPlus)
# validate after writing, allows for inspection on failure
try:
obj.validate()
except ValueError as ve:
if self.strict_validation:
raise ve
else:
self.warning(ve)
# after saving and validating, save subordinate objects
for obj in obj._related:
self.save_object(obj) | Save object to disk as JSON.
Generally shouldn't be called directly. | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/scrape/base.py#L76-L106 | null | class Scraper(scrapelib.Scraper):
""" Base class for all scrapers """
def __init__(self, jurisdiction, datadir, *, strict_validation=True, fastmode=False):
super(Scraper, self).__init__()
# set options
self.jurisdiction = jurisdiction
self.datadir = datadir
# scrapelib setup
self.timeout = settings.SCRAPELIB_TIMEOUT
self.requests_per_minute = settings.SCRAPELIB_RPM
self.retry_attempts = settings.SCRAPELIB_RETRY_ATTEMPTS
self.retry_wait_seconds = settings.SCRAPELIB_RETRY_WAIT_SECONDS
self.verify = settings.SCRAPELIB_VERIFY
# caching
if settings.CACHE_DIR:
self.cache_storage = scrapelib.FileCache(settings.CACHE_DIR)
if fastmode:
self.requests_per_minute = 0
self.cache_write_only = False
# validation
self.strict_validation = strict_validation
# 'type' -> {set of names}
self.output_names = defaultdict(set)
# logging convenience methods
self.logger = logging.getLogger("pupa")
self.info = self.logger.info
self.debug = self.logger.debug
self.warning = self.logger.warning
self.error = self.logger.error
self.critical = self.logger.critical
def do_scrape(self, **kwargs):
record = {'objects': defaultdict(int)}
self.output_names = defaultdict(set)
record['start'] = utils.utcnow()
for obj in self.scrape(**kwargs) or []:
if hasattr(obj, '__iter__'):
for iterobj in obj:
self.save_object(iterobj)
else:
self.save_object(obj)
record['end'] = utils.utcnow()
record['skipped'] = getattr(self, 'skipped', 0)
if not self.output_names:
raise ScrapeError('no objects returned from {} scrape'.format(self.__class__.__name__))
for _type, nameset in self.output_names.items():
record['objects'][_type] += len(nameset)
return record
def latest_session(self):
return self.jurisdiction.legislative_sessions[-1]['identifier']
def scrape(self, **kwargs):
raise NotImplementedError(self.__class__.__name__ + ' must provide a scrape() method')
|
opencivicdata/pupa | pupa/scrape/base.py | BaseModel.validate | python | def validate(self, schema=None):
if schema is None:
schema = self._schema
type_checker = Draft3Validator.TYPE_CHECKER.redefine(
"datetime", lambda c, d: isinstance(d, (datetime.date, datetime.datetime))
)
ValidatorCls = jsonschema.validators.extend(Draft3Validator, type_checker=type_checker)
validator = ValidatorCls(schema, format_checker=FormatChecker())
errors = [str(error) for error in validator.iter_errors(self.as_dict())]
if errors:
raise ScrapeValueError('validation of {} {} failed: {}'.format(
self.__class__.__name__, self._id, '\n\t'+'\n\t'.join(errors)
)) | Validate that we have a valid object.
On error, this will raise a `ScrapeValueError`
This also expects that the schemas assume that omitting required
in the schema asserts the field is optional, not required. This is
due to upstream schemas being in JSON Schema v3, and not validictory's
modified syntax.
^ TODO: FIXME | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/scrape/base.py#L171-L196 | [
"def as_dict(self):\n d = {}\n for attr in self._schema['properties'].keys():\n if hasattr(self, attr):\n d[attr] = getattr(self, attr)\n d['_id'] = self._id\n return d\n"
] | class BaseModel(object):
"""
This is the base class for all the Open Civic objects. This contains
common methods and abstractions for OCD objects.
"""
# to be overridden by children. Something like "person" or "organization".
# Used in :func:`validate`.
_type = None
_schema = None
def __init__(self):
super(BaseModel, self).__init__()
self._id = str(uuid.uuid1())
self._related = []
self.extras = {}
# validation
def pre_save(self, jurisdiction_id):
pass
def as_dict(self):
d = {}
for attr in self._schema['properties'].keys():
if hasattr(self, attr):
d[attr] = getattr(self, attr)
d['_id'] = self._id
return d
# operators
def __setattr__(self, key, val):
if key[0] != '_' and key not in self._schema['properties'].keys():
raise ScrapeValueError('property "{}" not in {} schema'.format(key, self._type))
super(BaseModel, self).__setattr__(key, val)
|
opencivicdata/pupa | pupa/scrape/base.py | SourceMixin.add_source | python | def add_source(self, url, *, note=''):
new = {'url': url, 'note': note}
self.sources.append(new) | Add a source URL from which data was collected | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/scrape/base.py#L222-L225 | null | class SourceMixin(object):
def __init__(self):
super(SourceMixin, self).__init__()
self.sources = []
|
opencivicdata/pupa | pupa/importers/people.py | PersonImporter.limit_spec | python | def limit_spec(self, spec):
if list(spec.keys()) == ['name']:
# if we're just resolving on name, include other names
return ((Q(name=spec['name']) | Q(other_names__name=spec['name'])) &
Q(memberships__organization__jurisdiction_id=self.jurisdiction_id))
spec['memberships__organization__jurisdiction_id'] = self.jurisdiction_id
return spec | Whenever we do a Pseudo ID lookup from the database, we need to limit
based on the memberships -> organization -> jurisdiction, so we scope
the resolution. | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/importers/people.py#L37-L48 | null | class PersonImporter(BaseImporter):
_type = 'person'
model_class = Person
related_models = {'identifiers': (PersonIdentifier, 'person_id', {}),
'other_names': (PersonName, 'person_id', {}),
'contact_details': (PersonContactDetail, 'person_id', {}),
'links': (PersonLink, 'person_id', {}),
'sources': (PersonSource, 'person_id', {}),
}
def _prepare_imports(self, dicts):
dicts = list(super(PersonImporter, self)._prepare_imports(dicts))
by_name = defaultdict(list)
for _, person in dicts:
by_name[person['name']].append(person)
for other in person['other_names']:
by_name[other['name']].append(person)
# check for duplicates
for name, people in by_name.items():
if len(people) > 1:
for person in people:
if person['birth_date'] == '':
raise SameNameError(name)
return dicts
def get_object(self, person):
all_names = [person['name']] + [o['name'] for o in person['other_names']]
matches = list(self.model_class.objects.filter(
Q(memberships__organization__jurisdiction_id=self.jurisdiction_id),
(Q(name__in=all_names) | Q(other_names__name__in=all_names))
).distinct('id'))
matches_length = len(matches)
if matches_length == 1 and not matches[0].birth_date:
return matches[0]
elif matches_length == 0:
raise self.model_class.DoesNotExist(
'No Person: {} in {}'.format(all_names, self.jurisdiction_id))
else:
# Try and match based on birth_date.
if person['birth_date']:
for match in matches:
if person['birth_date'] and match.birth_date == person['birth_date']:
return match
# If we got here, no match based on birth_date, a new person?
raise self.model_class.DoesNotExist(
'No Person: {} in {} with birth_date {}'.format(
all_names, self.jurisdiction_id, person['birth_date']))
raise SameNameError(person['name'])
|
opencivicdata/pupa | pupa/importers/organizations.py | OrganizationImporter._prepare_imports | python | def _prepare_imports(self, dicts):
# all pseudo parent ids we've seen
pseudo_ids = set()
# pseudo matches
pseudo_matches = {}
# get prepared imports from parent
prepared = dict(super(OrganizationImporter, self)._prepare_imports(dicts))
# collect parent pseudo_ids
for _, data in prepared.items():
parent_id = data.get('parent_id', None) or ''
if parent_id.startswith('~'):
pseudo_ids.add(parent_id)
# turn pseudo_ids into a tuple of dictionaries
pseudo_ids = [(ppid, get_pseudo_id(ppid)) for ppid in pseudo_ids]
# loop over all data again, finding the pseudo ids true json id
for json_id, data in prepared.items():
# check if this matches one of our ppids
for ppid, spec in pseudo_ids:
match = True
for k, v in spec.items():
if data[k] != v:
match = False
break
if match:
if ppid in pseudo_matches:
raise UnresolvedIdError('multiple matches for pseudo id: ' + ppid)
pseudo_matches[ppid] = json_id
# toposort the nodes so parents are imported first
network = Network()
in_network = set()
import_order = []
for json_id, data in prepared.items():
parent_id = data.get('parent_id', None)
# resolve pseudo_ids to their json id before building the network
if parent_id in pseudo_matches:
parent_id = pseudo_matches[parent_id]
network.add_node(json_id)
if parent_id:
# Right. There's an import dep. We need to add the edge from
# the parent to the current node, so that we import the parent
# before the current node.
network.add_edge(parent_id, json_id)
# resolve the sorted import order
for jid in network.sort():
import_order.append((jid, prepared[jid]))
in_network.add(jid)
# ensure all data made it into network (paranoid check, should never fail)
if in_network != set(prepared.keys()): # pragma: no cover
raise PupaInternalError("import is missing nodes in network set")
return import_order | an override for prepare imports that sorts the imports by parent_id dependencies | train | https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/importers/organizations.py#L61-L122 | null | class OrganizationImporter(BaseImporter):
_type = 'organization'
model_class = Organization
related_models = {'identifiers': (OrganizationIdentifier, 'organization_id', {}),
'other_names': (OrganizationName, 'organization_id', {}),
'contact_details': (OrganizationContactDetail, 'organization_id', {}),
'links': (OrganizationLink, 'organization_id', {}),
'sources': (OrganizationSource, 'organization_id', {}),
}
def get_object(self, org):
spec = {'classification': org['classification'],
'parent_id': org['parent_id']}
# add jurisdiction_id unless this is a party
jid = org.get('jurisdiction_id')
if jid:
spec['jurisdiction_id'] = jid
all_names = [org['name']] + [o['name'] for o in org['other_names']]
query = (Q(**spec) &
(Q(name__in=all_names) | Q(other_names__name__in=all_names)))
matches = list(self.model_class.objects.filter(query).distinct('id'))
matches_length = len(matches)
if matches_length == 1:
return matches[0]
elif matches_length == 0:
raise self.model_class.DoesNotExist(
'No Organization: {} in {}'.format(all_names, self.jurisdiction_id))
else:
raise SameOrgNameError(org['name'])
def prepare_for_db(self, data):
data['parent_id'] = self.resolve_json_id(data['parent_id'])
if data['classification'] != 'party':
data['jurisdiction_id'] = self.jurisdiction_id
return data
def limit_spec(self, spec):
if spec.get('classification') != 'party':
spec['jurisdiction_id'] = self.jurisdiction_id
name = spec.pop('name', None)
if name:
return (Q(**spec) &
(Q(name=name) | Q(other_names__name=name)))
return spec
|
openeemeter/eeweather | eeweather/geo.py | get_lat_long_climate_zones | python | def get_lat_long_climate_zones(latitude, longitude):
try:
from shapely.geometry import Point
except ImportError: # pragma: no cover
raise ImportError("Finding climate zone of lat/long points requires shapely.")
(
iecc_climate_zones,
iecc_moisture_regimes,
ba_climate_zones,
ca_climate_zones,
) = cached_data.climate_zone_geometry
point = Point(longitude, latitude) # x,y
climate_zones = {}
for iecc_climate_zone, shape in iecc_climate_zones:
if shape.contains(point):
climate_zones["iecc_climate_zone"] = iecc_climate_zone
break
else:
climate_zones["iecc_climate_zone"] = None
for iecc_moisture_regime, shape in iecc_moisture_regimes:
if shape.contains(point):
climate_zones["iecc_moisture_regime"] = iecc_moisture_regime
break
else:
climate_zones["iecc_moisture_regime"] = None
for ba_climate_zone, shape in ba_climate_zones:
if shape.contains(point):
climate_zones["ba_climate_zone"] = ba_climate_zone
break
else:
climate_zones["ba_climate_zone"] = None
for ca_climate_zone, shape in ca_climate_zones:
if shape.contains(point):
climate_zones["ca_climate_zone"] = ca_climate_zone
break
else:
climate_zones["ca_climate_zone"] = None
return climate_zones | Get climate zones that contain lat/long coordinates.
Parameters
----------
latitude : float
Latitude of point.
longitude : float
Longitude of point.
Returns
-------
climate_zones: dict of str
Region ids for each climate zone type. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/geo.py#L104-L161 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from .connections import metadata_db_connection_proxy
from .exceptions import UnrecognizedUSAFIDError, UnrecognizedZCTAError
from .utils import lazy_property
from .validation import valid_zcta_or_raise
__all__ = ("get_lat_long_climate_zones", "get_zcta_metadata", "zcta_to_lat_long")
class CachedData(object):
@lazy_property
def climate_zone_geometry(self):
try:
from shapely.geometry import shape
except ImportError: # pragma: no cover
raise ImportError(
"Matching by lat/lng within climate zone requires shapely"
)
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select
iecc_climate_zone, geometry
from
iecc_climate_zone_metadata
"""
)
iecc_climate_zones = [
(cz_id, shape(json.loads(geometry))) for (cz_id, geometry) in cur.fetchall()
]
cur.execute(
"""
select
iecc_moisture_regime, geometry
from
iecc_moisture_regime_metadata
"""
)
iecc_moisture_regimes = [
(cz_id, shape(json.loads(geometry))) for (cz_id, geometry) in cur.fetchall()
]
cur.execute(
"""
select
ba_climate_zone, geometry
from
ba_climate_zone_metadata
"""
)
ba_climate_zones = [
(cz_id, shape(json.loads(geometry))) for (cz_id, geometry) in cur.fetchall()
]
cur.execute(
"""
select
ca_climate_zone, geometry
from
ca_climate_zone_metadata
"""
)
ca_climate_zones = [
(cz_id, shape(json.loads(geometry))) for (cz_id, geometry) in cur.fetchall()
]
return (
iecc_climate_zones,
iecc_moisture_regimes,
ba_climate_zones,
ca_climate_zones,
)
cached_data = CachedData()
def get_zcta_metadata(zcta):
""" Get metadata about a ZIP Code Tabulation Area (ZCTA).
Parameters
----------
zcta : str
ID of ZIP Code Tabulation Area
Returns
-------
metadata : dict
Dict of data about the ZCTA, including lat/long coordinates.
"""
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select
*
from
zcta_metadata
where
zcta_id = ?
""",
(zcta,),
)
row = cur.fetchone()
if row is None:
raise UnrecognizedZCTAError(zcta)
return {col[0]: row[i] for i, col in enumerate(cur.description)}
def zcta_to_lat_long(zcta):
"""Get location of ZCTA centroid
Retrieves latitude and longitude of centroid of ZCTA
to use for matching with weather station.
Parameters
----------
zcta : str
ID of the target ZCTA.
Returns
-------
latitude : float
Latitude of centroid of ZCTA.
longitude : float
Target Longitude of centroid of ZCTA.
"""
valid_zcta_or_raise(zcta)
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select
latitude
, longitude
from
zcta_metadata
where
zcta_id = ?
""",
(zcta,),
)
# match existence checked in validate_zcta_or_raise(zcta)
latitude, longitude = cur.fetchone()
return float(latitude), float(longitude)
|
openeemeter/eeweather | eeweather/geo.py | get_zcta_metadata | python | def get_zcta_metadata(zcta):
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select
*
from
zcta_metadata
where
zcta_id = ?
""",
(zcta,),
)
row = cur.fetchone()
if row is None:
raise UnrecognizedZCTAError(zcta)
return {col[0]: row[i] for i, col in enumerate(cur.description)} | Get metadata about a ZIP Code Tabulation Area (ZCTA).
Parameters
----------
zcta : str
ID of ZIP Code Tabulation Area
Returns
-------
metadata : dict
Dict of data about the ZCTA, including lat/long coordinates. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/geo.py#L164-L193 | [
"def get_connection(self):\n if self._connection is None:\n self._connection = sqlite3.connect(self.db_path)\n return self._connection\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from .connections import metadata_db_connection_proxy
from .exceptions import UnrecognizedUSAFIDError, UnrecognizedZCTAError
from .utils import lazy_property
from .validation import valid_zcta_or_raise
__all__ = ("get_lat_long_climate_zones", "get_zcta_metadata", "zcta_to_lat_long")
class CachedData(object):
@lazy_property
def climate_zone_geometry(self):
try:
from shapely.geometry import shape
except ImportError: # pragma: no cover
raise ImportError(
"Matching by lat/lng within climate zone requires shapely"
)
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select
iecc_climate_zone, geometry
from
iecc_climate_zone_metadata
"""
)
iecc_climate_zones = [
(cz_id, shape(json.loads(geometry))) for (cz_id, geometry) in cur.fetchall()
]
cur.execute(
"""
select
iecc_moisture_regime, geometry
from
iecc_moisture_regime_metadata
"""
)
iecc_moisture_regimes = [
(cz_id, shape(json.loads(geometry))) for (cz_id, geometry) in cur.fetchall()
]
cur.execute(
"""
select
ba_climate_zone, geometry
from
ba_climate_zone_metadata
"""
)
ba_climate_zones = [
(cz_id, shape(json.loads(geometry))) for (cz_id, geometry) in cur.fetchall()
]
cur.execute(
"""
select
ca_climate_zone, geometry
from
ca_climate_zone_metadata
"""
)
ca_climate_zones = [
(cz_id, shape(json.loads(geometry))) for (cz_id, geometry) in cur.fetchall()
]
return (
iecc_climate_zones,
iecc_moisture_regimes,
ba_climate_zones,
ca_climate_zones,
)
cached_data = CachedData()
def get_lat_long_climate_zones(latitude, longitude):
""" Get climate zones that contain lat/long coordinates.
Parameters
----------
latitude : float
Latitude of point.
longitude : float
Longitude of point.
Returns
-------
climate_zones: dict of str
Region ids for each climate zone type.
"""
try:
from shapely.geometry import Point
except ImportError: # pragma: no cover
raise ImportError("Finding climate zone of lat/long points requires shapely.")
(
iecc_climate_zones,
iecc_moisture_regimes,
ba_climate_zones,
ca_climate_zones,
) = cached_data.climate_zone_geometry
point = Point(longitude, latitude) # x,y
climate_zones = {}
for iecc_climate_zone, shape in iecc_climate_zones:
if shape.contains(point):
climate_zones["iecc_climate_zone"] = iecc_climate_zone
break
else:
climate_zones["iecc_climate_zone"] = None
for iecc_moisture_regime, shape in iecc_moisture_regimes:
if shape.contains(point):
climate_zones["iecc_moisture_regime"] = iecc_moisture_regime
break
else:
climate_zones["iecc_moisture_regime"] = None
for ba_climate_zone, shape in ba_climate_zones:
if shape.contains(point):
climate_zones["ba_climate_zone"] = ba_climate_zone
break
else:
climate_zones["ba_climate_zone"] = None
for ca_climate_zone, shape in ca_climate_zones:
if shape.contains(point):
climate_zones["ca_climate_zone"] = ca_climate_zone
break
else:
climate_zones["ca_climate_zone"] = None
return climate_zones
def zcta_to_lat_long(zcta):
"""Get location of ZCTA centroid
Retrieves latitude and longitude of centroid of ZCTA
to use for matching with weather station.
Parameters
----------
zcta : str
ID of the target ZCTA.
Returns
-------
latitude : float
Latitude of centroid of ZCTA.
longitude : float
Target Longitude of centroid of ZCTA.
"""
valid_zcta_or_raise(zcta)
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select
latitude
, longitude
from
zcta_metadata
where
zcta_id = ?
""",
(zcta,),
)
# match existence checked in validate_zcta_or_raise(zcta)
latitude, longitude = cur.fetchone()
return float(latitude), float(longitude)
|
openeemeter/eeweather | eeweather/geo.py | zcta_to_lat_long | python | def zcta_to_lat_long(zcta):
valid_zcta_or_raise(zcta)
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select
latitude
, longitude
from
zcta_metadata
where
zcta_id = ?
""",
(zcta,),
)
# match existence checked in validate_zcta_or_raise(zcta)
latitude, longitude = cur.fetchone()
return float(latitude), float(longitude) | Get location of ZCTA centroid
Retrieves latitude and longitude of centroid of ZCTA
to use for matching with weather station.
Parameters
----------
zcta : str
ID of the target ZCTA.
Returns
-------
latitude : float
Latitude of centroid of ZCTA.
longitude : float
Target Longitude of centroid of ZCTA. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/geo.py#L196-L234 | [
"def valid_zcta_or_raise(zcta):\n \"\"\" Check if ZCTA is valid and raise eeweather.UnrecognizedZCTAError if not. \"\"\"\n conn = metadata_db_connection_proxy.get_connection()\n cur = conn.cursor()\n\n cur.execute(\n \"\"\"\n select exists (\n select\n zcta_id\n from\n... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from .connections import metadata_db_connection_proxy
from .exceptions import UnrecognizedUSAFIDError, UnrecognizedZCTAError
from .utils import lazy_property
from .validation import valid_zcta_or_raise
__all__ = ("get_lat_long_climate_zones", "get_zcta_metadata", "zcta_to_lat_long")
class CachedData(object):
@lazy_property
def climate_zone_geometry(self):
try:
from shapely.geometry import shape
except ImportError: # pragma: no cover
raise ImportError(
"Matching by lat/lng within climate zone requires shapely"
)
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select
iecc_climate_zone, geometry
from
iecc_climate_zone_metadata
"""
)
iecc_climate_zones = [
(cz_id, shape(json.loads(geometry))) for (cz_id, geometry) in cur.fetchall()
]
cur.execute(
"""
select
iecc_moisture_regime, geometry
from
iecc_moisture_regime_metadata
"""
)
iecc_moisture_regimes = [
(cz_id, shape(json.loads(geometry))) for (cz_id, geometry) in cur.fetchall()
]
cur.execute(
"""
select
ba_climate_zone, geometry
from
ba_climate_zone_metadata
"""
)
ba_climate_zones = [
(cz_id, shape(json.loads(geometry))) for (cz_id, geometry) in cur.fetchall()
]
cur.execute(
"""
select
ca_climate_zone, geometry
from
ca_climate_zone_metadata
"""
)
ca_climate_zones = [
(cz_id, shape(json.loads(geometry))) for (cz_id, geometry) in cur.fetchall()
]
return (
iecc_climate_zones,
iecc_moisture_regimes,
ba_climate_zones,
ca_climate_zones,
)
cached_data = CachedData()
def get_lat_long_climate_zones(latitude, longitude):
""" Get climate zones that contain lat/long coordinates.
Parameters
----------
latitude : float
Latitude of point.
longitude : float
Longitude of point.
Returns
-------
climate_zones: dict of str
Region ids for each climate zone type.
"""
try:
from shapely.geometry import Point
except ImportError: # pragma: no cover
raise ImportError("Finding climate zone of lat/long points requires shapely.")
(
iecc_climate_zones,
iecc_moisture_regimes,
ba_climate_zones,
ca_climate_zones,
) = cached_data.climate_zone_geometry
point = Point(longitude, latitude) # x,y
climate_zones = {}
for iecc_climate_zone, shape in iecc_climate_zones:
if shape.contains(point):
climate_zones["iecc_climate_zone"] = iecc_climate_zone
break
else:
climate_zones["iecc_climate_zone"] = None
for iecc_moisture_regime, shape in iecc_moisture_regimes:
if shape.contains(point):
climate_zones["iecc_moisture_regime"] = iecc_moisture_regime
break
else:
climate_zones["iecc_moisture_regime"] = None
for ba_climate_zone, shape in ba_climate_zones:
if shape.contains(point):
climate_zones["ba_climate_zone"] = ba_climate_zone
break
else:
climate_zones["ba_climate_zone"] = None
for ca_climate_zone, shape in ca_climate_zones:
if shape.contains(point):
climate_zones["ca_climate_zone"] = ca_climate_zone
break
else:
climate_zones["ca_climate_zone"] = None
return climate_zones
def get_zcta_metadata(zcta):
""" Get metadata about a ZIP Code Tabulation Area (ZCTA).
Parameters
----------
zcta : str
ID of ZIP Code Tabulation Area
Returns
-------
metadata : dict
Dict of data about the ZCTA, including lat/long coordinates.
"""
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select
*
from
zcta_metadata
where
zcta_id = ?
""",
(zcta,),
)
row = cur.fetchone()
if row is None:
raise UnrecognizedZCTAError(zcta)
return {col[0]: row[i] for i, col in enumerate(cur.description)}
|
openeemeter/eeweather | eeweather/summaries.py | get_zcta_ids | python | def get_zcta_ids(state=None):
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
if state is None:
cur.execute(
"""
select zcta_id from zcta_metadata
"""
)
else:
cur.execute(
"""
select zcta_id from zcta_metadata where state = ?
""",
(state,),
)
return [row[0] for row in cur.fetchall()] | Get ids of all supported ZCTAs, optionally by state.
Parameters
----------
state : str, optional
Select zipcodes only from this state or territory, given as 2-letter
abbreviation (e.g., ``'CA'``, ``'PR'``).
Returns
-------
results : list of str
List of all supported selected ZCTA IDs. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/summaries.py#L25-L55 | [
"def get_connection(self):\n if self._connection is None:\n self._connection = sqlite3.connect(self.db_path)\n return self._connection\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .connections import metadata_db_connection_proxy
__all__ = ("get_zcta_ids", "get_isd_station_usaf_ids")
def get_isd_station_usaf_ids(state=None):
""" Get USAF IDs of all supported ISD stations, optionally by state.
Parameters
----------
state : str, optional
Select ISD station USAF IDs only from this state or territory, given
as 2-letter abbreviation (e.g., ``'CA'``, ``'PR'``).
Returns
-------
results : list of str
List of all supported selected ISD station USAF IDs.
"""
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
if state is None:
cur.execute(
"""
select usaf_id from isd_station_metadata
"""
)
else:
cur.execute(
"""
select usaf_id from isd_station_metadata where state = ?
""",
(state,),
)
return [row[0] for row in cur.fetchall()]
|
openeemeter/eeweather | eeweather/validation.py | valid_zcta_or_raise | python | def valid_zcta_or_raise(zcta):
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select exists (
select
zcta_id
from
zcta_metadata
where
zcta_id = ?
)
""",
(zcta,),
)
(exists,) = cur.fetchone()
if exists:
return True
else:
raise UnrecognizedZCTAError(zcta) | Check if ZCTA is valid and raise eeweather.UnrecognizedZCTAError if not. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/validation.py#L27-L49 | [
"def get_connection(self):\n if self._connection is None:\n self._connection = sqlite3.connect(self.db_path)\n return self._connection\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .connections import metadata_db_connection_proxy
from .exceptions import UnrecognizedZCTAError, UnrecognizedUSAFIDError
__all__ = ("valid_zcta_or_raise", "valid_usaf_id_or_raise")
def valid_usaf_id_or_raise(usaf_id):
""" Check if USAF ID is valid and raise eeweather.UnrecognizedUSAFIDError if not. """
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select exists (
select
usaf_id
from
isd_station_metadata
where
usaf_id = ?
)
""",
(usaf_id,),
)
(exists,) = cur.fetchone()
if exists:
return True
else:
raise UnrecognizedUSAFIDError(usaf_id)
|
openeemeter/eeweather | eeweather/validation.py | valid_usaf_id_or_raise | python | def valid_usaf_id_or_raise(usaf_id):
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select exists (
select
usaf_id
from
isd_station_metadata
where
usaf_id = ?
)
""",
(usaf_id,),
)
(exists,) = cur.fetchone()
if exists:
return True
else:
raise UnrecognizedUSAFIDError(usaf_id) | Check if USAF ID is valid and raise eeweather.UnrecognizedUSAFIDError if not. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/validation.py#L52-L74 | [
"def get_connection(self):\n if self._connection is None:\n self._connection = sqlite3.connect(self.db_path)\n return self._connection\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .connections import metadata_db_connection_proxy
from .exceptions import UnrecognizedZCTAError, UnrecognizedUSAFIDError
__all__ = ("valid_zcta_or_raise", "valid_usaf_id_or_raise")
def valid_zcta_or_raise(zcta):
""" Check if ZCTA is valid and raise eeweather.UnrecognizedZCTAError if not. """
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select exists (
select
zcta_id
from
zcta_metadata
where
zcta_id = ?
)
""",
(zcta,),
)
(exists,) = cur.fetchone()
if exists:
return True
else:
raise UnrecognizedZCTAError(zcta)
|
openeemeter/eeweather | eeweather/ranking.py | rank_stations | python | def rank_stations(
site_latitude,
site_longitude,
site_state=None,
site_elevation=None,
match_iecc_climate_zone=False,
match_iecc_moisture_regime=False,
match_ba_climate_zone=False,
match_ca_climate_zone=False,
match_state=False,
minimum_quality=None,
minimum_tmy3_class=None,
max_distance_meters=None,
max_difference_elevation_meters=None,
is_tmy3=None,
is_cz2010=None,
):
candidates = cached_data.all_station_metadata
# compute distances
candidates_defined_lat_long = candidates[
candidates.latitude.notnull() & candidates.longitude.notnull()
]
candidates_latitude = candidates_defined_lat_long.latitude
candidates_longitude = candidates_defined_lat_long.longitude
tiled_site_latitude = np.tile(site_latitude, candidates_latitude.shape)
tiled_site_longitude = np.tile(site_longitude, candidates_longitude.shape)
geod = pyproj.Geod(ellps="WGS84")
dists = geod.inv(
tiled_site_longitude,
tiled_site_latitude,
candidates_longitude.values,
candidates_latitude.values,
)[2]
distance_meters = pd.Series(dists, index=candidates_defined_lat_long.index).reindex(
candidates.index
)
candidates["distance_meters"] = distance_meters
if site_elevation is not None:
difference_elevation_meters = (candidates.elevation - site_elevation).abs()
else:
difference_elevation_meters = None
candidates["difference_elevation_meters"] = difference_elevation_meters
site_climate_zones = get_lat_long_climate_zones(site_latitude, site_longitude)
site_iecc_climate_zone = site_climate_zones["iecc_climate_zone"]
site_iecc_moisture_regime = site_climate_zones["iecc_moisture_regime"]
site_ca_climate_zone = site_climate_zones["ca_climate_zone"]
site_ba_climate_zone = site_climate_zones["ba_climate_zone"]
# create filters
filters = []
if match_iecc_climate_zone:
if site_iecc_climate_zone is None:
filters.append(candidates.iecc_climate_zone.isnull())
else:
filters.append(candidates.iecc_climate_zone == site_iecc_climate_zone)
if match_iecc_moisture_regime:
if site_iecc_moisture_regime is None:
filters.append(candidates.iecc_moisture_regime.isnull())
else:
filters.append(candidates.iecc_moisture_regime == site_iecc_moisture_regime)
if match_ba_climate_zone:
if site_ba_climate_zone is None:
filters.append(candidates.ba_climate_zone.isnull())
else:
filters.append(candidates.ba_climate_zone == site_ba_climate_zone)
if match_ca_climate_zone:
if site_ca_climate_zone is None:
filters.append(candidates.ca_climate_zone.isnull())
else:
filters.append(candidates.ca_climate_zone == site_ca_climate_zone)
if match_state:
if site_state is None:
filters.append(candidates.state.isnull())
else:
filters.append(candidates.state == site_state)
if is_tmy3 is not None:
filters.append(candidates.is_tmy3.isin([is_tmy3]))
if is_cz2010 is not None:
filters.append(candidates.is_cz2010.isin([is_cz2010]))
if minimum_quality == "low":
filters.append(candidates.rough_quality.isin(["high", "medium", "low"]))
elif minimum_quality == "medium":
filters.append(candidates.rough_quality.isin(["high", "medium"]))
elif minimum_quality == "high":
filters.append(candidates.rough_quality.isin(["high"]))
if minimum_tmy3_class == "III":
filters.append(candidates.tmy3_class.isin(["I", "II", "III"]))
elif minimum_tmy3_class == "II":
filters.append(candidates.tmy3_class.isin(["I", "II"]))
elif minimum_tmy3_class == "I":
filters.append(candidates.tmy3_class.isin(["I"]))
if max_distance_meters is not None:
filters.append(candidates.distance_meters <= max_distance_meters)
if max_difference_elevation_meters is not None and site_elevation is not None:
filters.append(
candidates.difference_elevation_meters <= max_difference_elevation_meters
)
combined_filters = _combine_filters(filters, candidates.index)
filtered_candidates = candidates[combined_filters]
ranked_filtered_candidates = filtered_candidates.sort_values(by=["distance_meters"])
# add rank column
ranks = range(1, 1 + len(ranked_filtered_candidates))
ranked_filtered_candidates.insert(0, "rank", ranks)
return ranked_filtered_candidates[
[
"rank",
"distance_meters",
"latitude",
"longitude",
"iecc_climate_zone",
"iecc_moisture_regime",
"ba_climate_zone",
"ca_climate_zone",
"rough_quality",
"elevation",
"state",
"tmy3_class",
"is_tmy3",
"is_cz2010",
"difference_elevation_meters",
]
] | Get a ranked, filtered set of candidate weather stations and metadata
for a particular site.
Parameters
----------
site_latitude : float
Latitude of target site for which to find candidate weather stations.
site_longitude : float
Longitude of target site for which to find candidate weather stations.
site_state : str, 2 letter abbreviation
US state of target site, used optionally to filter potential candidate
weather stations. Ignored unless ``match_state=True``.
site_elevation : float
Elevation of target site in meters, used optionally to filter potential
candidate weather stations. Ignored unless
``max_difference_elevation_meters`` is set.
match_iecc_climate_zone : bool
If ``True``, filter candidate weather stations to those
matching the IECC climate zone of the target site.
match_iecc_moisture_regime : bool
If ``True``, filter candidate weather stations to those
matching the IECC moisture regime of the target site.
match_ca_climate_zone : bool
If ``True``, filter candidate weather stations to those
matching the CA climate zone of the target site.
match_ba_climate_zone : bool
If ``True``, filter candidate weather stations to those
matching the Building America climate zone of the target site.
match_state : bool
If ``True``, filter candidate weather stations to those
matching the US state of the target site, as specified by
``site_state=True``.
minimum_quality : str, ``'high'``, ``'medium'``, ``'low'``
If given, filter candidate weather stations to those meeting or
exceeding the given quality, as summarized by the frequency and
availability of observations in the NOAA Integrated Surface Database.
minimum_tmy3_class : str, ``'I'``, ``'II'``, ``'III'``
If given, filter candidate weather stations to those meeting or
exceeding the given class, as reported in the NREL TMY3 metadata.
max_distance_meters : float
If given, filter candidate weather stations to those within the
``max_distance_meters`` of the target site location.
max_difference_elevation_meters : float
If given, filter candidate weather stations to those with elevations
within ``max_difference_elevation_meters`` of the target site elevation.
is_tmy3 : bool
If given, filter candidate weather stations to those for which TMY3
normal year temperature data is available.
is_cz2010 : bool
If given, filter candidate weather stations to those for which CZ2010
normal year temperature data is available.
Returns
-------
ranked_filtered_candidates : :any:`pandas.DataFrame`
Index is ``usaf_id``. Each row contains a potential weather station
match and metadata. Contains the following columns:
- ``rank``: Rank of weather station match for the target site.
- ``distance_meters``: Distance from target site to weather station site.
- ``latitude``: Latitude of weather station site.
- ``longitude``: Longitude of weather station site.
- ``iecc_climate_zone``: IECC Climate Zone ID (1-8)
- ``iecc_moisture_regime``: IECC Moisture Regime ID (A-C)
- ``ba_climate_zone``: Building America climate zone name
- ``ca_climate_zone``: Califoria climate zone number
- ``rough_quality``: Approximate measure of frequency of ISD
observations data at weather station.
- ``elevation``: Elevation of weather station site, if available.
- ``state``: US state of weather station site, if applicable.
- ``tmy3_class``: Weather station class as reported by NREL TMY3, if
available
- ``is_tmy3``: Weather station has associated TMY3 data.
- ``is_cz2010``: Weather station has associated CZ2010 data.
- ``difference_elevation_meters``: Absolute difference in meters
between target site elevation and weather station elevation, if
available. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/ranking.py#L107-L320 | [
"def get_lat_long_climate_zones(latitude, longitude):\n \"\"\" Get climate zones that contain lat/long coordinates.\n\n Parameters\n ----------\n latitude : float\n Latitude of point.\n longitude : float\n Longitude of point.\n\n Returns\n -------\n climate_zones: dict of str\n... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
import numpy as np
import pyproj
import eeweather.mockable
from .exceptions import ISDDataNotAvailableError
from .connections import metadata_db_connection_proxy
from .geo import get_lat_long_climate_zones
from .stations import ISDStation
from .utils import lazy_property
from .warnings import EEWeatherWarning
__all__ = ("rank_stations", "combine_ranked_stations", "select_station")
class CachedData(object):
@lazy_property
def all_station_metadata(self):
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select
isd.usaf_id
, isd.latitude
, isd.longitude
, isd.iecc_climate_zone
, isd.iecc_moisture_regime
, isd.ba_climate_zone
, isd.ca_climate_zone
, isd.quality as rough_quality
, isd.elevation
, isd.state
, tmy3.class as tmy3_class
, tmy3.usaf_id is not null as is_tmy3
, cz2010.usaf_id is not null as is_cz2010
from
isd_station_metadata as isd
left join cz2010_station_metadata as cz2010 on
isd.usaf_id = cz2010.usaf_id
left join tmy3_station_metadata as tmy3 on
isd.usaf_id = tmy3.usaf_id
order by
isd.usaf_id
"""
)
df = pd.DataFrame(
[
{col[0]: val for col, val in zip(cur.description, row)}
for row in cur.fetchall()
],
columns=[
"usaf_id",
"latitude",
"longitude",
"iecc_climate_zone",
"iecc_moisture_regime",
"ba_climate_zone",
"ca_climate_zone",
"rough_quality",
"elevation",
"state",
"tmy3_class",
"is_tmy3",
"is_cz2010",
],
).set_index("usaf_id")
df["latitude"] = df.latitude.astype(float)
df["longitude"] = df.longitude.astype(float)
df["elevation"] = df.elevation.astype(float)
df["is_tmy3"] = df.is_tmy3.astype(bool)
df["is_cz2010"] = df.is_cz2010.astype(bool)
return df
cached_data = CachedData()
def _combine_filters(filters, index):
combined_filters = pd.Series(True, index=index)
for f in filters:
combined_filters &= f
return combined_filters
def combine_ranked_stations(rankings):
""" Combine :any:`pandas.DataFrame` s of candidate weather stations to form
a hybrid ranking dataframe.
Parameters
----------
rankings : list of :any:`pandas.DataFrame`
Dataframes of ranked weather station candidates and metadata.
All ranking dataframes should have the same columns and must be
sorted by rank.
Returns
-------
ranked_filtered_candidates : :any:`pandas.DataFrame`
Dataframe has a rank column and the same columns given in the source
dataframes.
"""
if len(rankings) == 0:
raise ValueError("Requires at least one ranking.")
combined_ranking = rankings[0]
for ranking in rankings[1:]:
filtered_ranking = ranking[~ranking.index.isin(combined_ranking.index)]
combined_ranking = pd.concat([combined_ranking, filtered_ranking])
combined_ranking["rank"] = range(1, 1 + len(combined_ranking))
return combined_ranking
@eeweather.mockable.mockable()
def load_isd_hourly_temp_data(station, start_date, end_date): # pragma: no cover
return station.load_isd_hourly_temp_data(start_date, end_date)
def select_station(
candidates,
coverage_range=None,
min_fraction_coverage=0.9,
distance_warnings=(50000, 200000),
rank=1,
):
""" Select a station from a list of candidates that meets given data
quality criteria.
Parameters
----------
candidates : :any:`pandas.DataFrame`
A dataframe of the form given by :any:`eeweather.rank_stations` or
:any:`eeweather.combine_ranked_stations`, specifically having at least
an index with ``usaf_id`` values and the column ``distance_meters``.
Returns
-------
isd_station, warnings : tuple of (:any:`eeweather.ISDStation`, list of str)
A qualified weather station. ``None`` if no station meets criteria.
"""
def _test_station(station):
if coverage_range is None:
return True, []
else:
start_date, end_date = coverage_range
try:
tempC, warnings = eeweather.mockable.load_isd_hourly_temp_data(
station, start_date, end_date
)
except ISDDataNotAvailableError:
return False, [] # reject
# TODO(philngo): also need to incorporate within-day limits
if len(tempC) > 0:
fraction_coverage = tempC.notnull().sum() / float(len(tempC))
return (fraction_coverage > min_fraction_coverage), warnings
else:
return False, [] # reject
def _station_warnings(station, distance_meters):
return [
EEWeatherWarning(
qualified_name="eeweather.exceeds_maximum_distance",
description=(
"Distance from target to weather station is greater"
"than the specified km."
),
data={
"distance_meters": distance_meters,
"max_distance_meters": d,
"rank": rank,
},
)
for d in distance_warnings
if distance_meters > d
]
n_stations_passed = 0
for usaf_id, row in candidates.iterrows():
station = ISDStation(usaf_id)
test_result, warnings = _test_station(station)
if test_result:
n_stations_passed += 1
if n_stations_passed == rank:
if not warnings:
warnings = []
warnings.extend(_station_warnings(station, row.distance_meters))
return station, warnings
no_station_warning = EEWeatherWarning(
qualified_name="eeweather.no_weather_station_selected",
description=(
"No weather station found with the specified rank and"
" minimum fracitional coverage."
),
data={"rank": rank, "min_fraction_coverage": min_fraction_coverage},
)
return None, [no_station_warning]
|
openeemeter/eeweather | eeweather/ranking.py | combine_ranked_stations | python | def combine_ranked_stations(rankings):
if len(rankings) == 0:
raise ValueError("Requires at least one ranking.")
combined_ranking = rankings[0]
for ranking in rankings[1:]:
filtered_ranking = ranking[~ranking.index.isin(combined_ranking.index)]
combined_ranking = pd.concat([combined_ranking, filtered_ranking])
combined_ranking["rank"] = range(1, 1 + len(combined_ranking))
return combined_ranking | Combine :any:`pandas.DataFrame` s of candidate weather stations to form
a hybrid ranking dataframe.
Parameters
----------
rankings : list of :any:`pandas.DataFrame`
Dataframes of ranked weather station candidates and metadata.
All ranking dataframes should have the same columns and must be
sorted by rank.
Returns
-------
ranked_filtered_candidates : :any:`pandas.DataFrame`
Dataframe has a rank column and the same columns given in the source
dataframes. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/ranking.py#L323-L350 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
import numpy as np
import pyproj
import eeweather.mockable
from .exceptions import ISDDataNotAvailableError
from .connections import metadata_db_connection_proxy
from .geo import get_lat_long_climate_zones
from .stations import ISDStation
from .utils import lazy_property
from .warnings import EEWeatherWarning
__all__ = ("rank_stations", "combine_ranked_stations", "select_station")
class CachedData(object):
@lazy_property
def all_station_metadata(self):
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select
isd.usaf_id
, isd.latitude
, isd.longitude
, isd.iecc_climate_zone
, isd.iecc_moisture_regime
, isd.ba_climate_zone
, isd.ca_climate_zone
, isd.quality as rough_quality
, isd.elevation
, isd.state
, tmy3.class as tmy3_class
, tmy3.usaf_id is not null as is_tmy3
, cz2010.usaf_id is not null as is_cz2010
from
isd_station_metadata as isd
left join cz2010_station_metadata as cz2010 on
isd.usaf_id = cz2010.usaf_id
left join tmy3_station_metadata as tmy3 on
isd.usaf_id = tmy3.usaf_id
order by
isd.usaf_id
"""
)
df = pd.DataFrame(
[
{col[0]: val for col, val in zip(cur.description, row)}
for row in cur.fetchall()
],
columns=[
"usaf_id",
"latitude",
"longitude",
"iecc_climate_zone",
"iecc_moisture_regime",
"ba_climate_zone",
"ca_climate_zone",
"rough_quality",
"elevation",
"state",
"tmy3_class",
"is_tmy3",
"is_cz2010",
],
).set_index("usaf_id")
df["latitude"] = df.latitude.astype(float)
df["longitude"] = df.longitude.astype(float)
df["elevation"] = df.elevation.astype(float)
df["is_tmy3"] = df.is_tmy3.astype(bool)
df["is_cz2010"] = df.is_cz2010.astype(bool)
return df
cached_data = CachedData()
def _combine_filters(filters, index):
combined_filters = pd.Series(True, index=index)
for f in filters:
combined_filters &= f
return combined_filters
def rank_stations(
site_latitude,
site_longitude,
site_state=None,
site_elevation=None,
match_iecc_climate_zone=False,
match_iecc_moisture_regime=False,
match_ba_climate_zone=False,
match_ca_climate_zone=False,
match_state=False,
minimum_quality=None,
minimum_tmy3_class=None,
max_distance_meters=None,
max_difference_elevation_meters=None,
is_tmy3=None,
is_cz2010=None,
):
""" Get a ranked, filtered set of candidate weather stations and metadata
for a particular site.
Parameters
----------
site_latitude : float
Latitude of target site for which to find candidate weather stations.
site_longitude : float
Longitude of target site for which to find candidate weather stations.
site_state : str, 2 letter abbreviation
US state of target site, used optionally to filter potential candidate
weather stations. Ignored unless ``match_state=True``.
site_elevation : float
Elevation of target site in meters, used optionally to filter potential
candidate weather stations. Ignored unless
``max_difference_elevation_meters`` is set.
match_iecc_climate_zone : bool
If ``True``, filter candidate weather stations to those
matching the IECC climate zone of the target site.
match_iecc_moisture_regime : bool
If ``True``, filter candidate weather stations to those
matching the IECC moisture regime of the target site.
match_ca_climate_zone : bool
If ``True``, filter candidate weather stations to those
matching the CA climate zone of the target site.
match_ba_climate_zone : bool
If ``True``, filter candidate weather stations to those
matching the Building America climate zone of the target site.
match_state : bool
If ``True``, filter candidate weather stations to those
matching the US state of the target site, as specified by
``site_state=True``.
minimum_quality : str, ``'high'``, ``'medium'``, ``'low'``
If given, filter candidate weather stations to those meeting or
exceeding the given quality, as summarized by the frequency and
availability of observations in the NOAA Integrated Surface Database.
minimum_tmy3_class : str, ``'I'``, ``'II'``, ``'III'``
If given, filter candidate weather stations to those meeting or
exceeding the given class, as reported in the NREL TMY3 metadata.
max_distance_meters : float
If given, filter candidate weather stations to those within the
``max_distance_meters`` of the target site location.
max_difference_elevation_meters : float
If given, filter candidate weather stations to those with elevations
within ``max_difference_elevation_meters`` of the target site elevation.
is_tmy3 : bool
If given, filter candidate weather stations to those for which TMY3
normal year temperature data is available.
is_cz2010 : bool
If given, filter candidate weather stations to those for which CZ2010
normal year temperature data is available.
Returns
-------
ranked_filtered_candidates : :any:`pandas.DataFrame`
Index is ``usaf_id``. Each row contains a potential weather station
match and metadata. Contains the following columns:
- ``rank``: Rank of weather station match for the target site.
- ``distance_meters``: Distance from target site to weather station site.
- ``latitude``: Latitude of weather station site.
- ``longitude``: Longitude of weather station site.
- ``iecc_climate_zone``: IECC Climate Zone ID (1-8)
- ``iecc_moisture_regime``: IECC Moisture Regime ID (A-C)
- ``ba_climate_zone``: Building America climate zone name
- ``ca_climate_zone``: Califoria climate zone number
- ``rough_quality``: Approximate measure of frequency of ISD
observations data at weather station.
- ``elevation``: Elevation of weather station site, if available.
- ``state``: US state of weather station site, if applicable.
- ``tmy3_class``: Weather station class as reported by NREL TMY3, if
available
- ``is_tmy3``: Weather station has associated TMY3 data.
- ``is_cz2010``: Weather station has associated CZ2010 data.
- ``difference_elevation_meters``: Absolute difference in meters
between target site elevation and weather station elevation, if
available.
"""
candidates = cached_data.all_station_metadata
# compute distances
candidates_defined_lat_long = candidates[
candidates.latitude.notnull() & candidates.longitude.notnull()
]
candidates_latitude = candidates_defined_lat_long.latitude
candidates_longitude = candidates_defined_lat_long.longitude
tiled_site_latitude = np.tile(site_latitude, candidates_latitude.shape)
tiled_site_longitude = np.tile(site_longitude, candidates_longitude.shape)
geod = pyproj.Geod(ellps="WGS84")
dists = geod.inv(
tiled_site_longitude,
tiled_site_latitude,
candidates_longitude.values,
candidates_latitude.values,
)[2]
distance_meters = pd.Series(dists, index=candidates_defined_lat_long.index).reindex(
candidates.index
)
candidates["distance_meters"] = distance_meters
if site_elevation is not None:
difference_elevation_meters = (candidates.elevation - site_elevation).abs()
else:
difference_elevation_meters = None
candidates["difference_elevation_meters"] = difference_elevation_meters
site_climate_zones = get_lat_long_climate_zones(site_latitude, site_longitude)
site_iecc_climate_zone = site_climate_zones["iecc_climate_zone"]
site_iecc_moisture_regime = site_climate_zones["iecc_moisture_regime"]
site_ca_climate_zone = site_climate_zones["ca_climate_zone"]
site_ba_climate_zone = site_climate_zones["ba_climate_zone"]
# create filters
filters = []
if match_iecc_climate_zone:
if site_iecc_climate_zone is None:
filters.append(candidates.iecc_climate_zone.isnull())
else:
filters.append(candidates.iecc_climate_zone == site_iecc_climate_zone)
if match_iecc_moisture_regime:
if site_iecc_moisture_regime is None:
filters.append(candidates.iecc_moisture_regime.isnull())
else:
filters.append(candidates.iecc_moisture_regime == site_iecc_moisture_regime)
if match_ba_climate_zone:
if site_ba_climate_zone is None:
filters.append(candidates.ba_climate_zone.isnull())
else:
filters.append(candidates.ba_climate_zone == site_ba_climate_zone)
if match_ca_climate_zone:
if site_ca_climate_zone is None:
filters.append(candidates.ca_climate_zone.isnull())
else:
filters.append(candidates.ca_climate_zone == site_ca_climate_zone)
if match_state:
if site_state is None:
filters.append(candidates.state.isnull())
else:
filters.append(candidates.state == site_state)
if is_tmy3 is not None:
filters.append(candidates.is_tmy3.isin([is_tmy3]))
if is_cz2010 is not None:
filters.append(candidates.is_cz2010.isin([is_cz2010]))
if minimum_quality == "low":
filters.append(candidates.rough_quality.isin(["high", "medium", "low"]))
elif minimum_quality == "medium":
filters.append(candidates.rough_quality.isin(["high", "medium"]))
elif minimum_quality == "high":
filters.append(candidates.rough_quality.isin(["high"]))
if minimum_tmy3_class == "III":
filters.append(candidates.tmy3_class.isin(["I", "II", "III"]))
elif minimum_tmy3_class == "II":
filters.append(candidates.tmy3_class.isin(["I", "II"]))
elif minimum_tmy3_class == "I":
filters.append(candidates.tmy3_class.isin(["I"]))
if max_distance_meters is not None:
filters.append(candidates.distance_meters <= max_distance_meters)
if max_difference_elevation_meters is not None and site_elevation is not None:
filters.append(
candidates.difference_elevation_meters <= max_difference_elevation_meters
)
combined_filters = _combine_filters(filters, candidates.index)
filtered_candidates = candidates[combined_filters]
ranked_filtered_candidates = filtered_candidates.sort_values(by=["distance_meters"])
# add rank column
ranks = range(1, 1 + len(ranked_filtered_candidates))
ranked_filtered_candidates.insert(0, "rank", ranks)
return ranked_filtered_candidates[
[
"rank",
"distance_meters",
"latitude",
"longitude",
"iecc_climate_zone",
"iecc_moisture_regime",
"ba_climate_zone",
"ca_climate_zone",
"rough_quality",
"elevation",
"state",
"tmy3_class",
"is_tmy3",
"is_cz2010",
"difference_elevation_meters",
]
]
@eeweather.mockable.mockable()
def load_isd_hourly_temp_data(station, start_date, end_date): # pragma: no cover
return station.load_isd_hourly_temp_data(start_date, end_date)
def select_station(
candidates,
coverage_range=None,
min_fraction_coverage=0.9,
distance_warnings=(50000, 200000),
rank=1,
):
""" Select a station from a list of candidates that meets given data
quality criteria.
Parameters
----------
candidates : :any:`pandas.DataFrame`
A dataframe of the form given by :any:`eeweather.rank_stations` or
:any:`eeweather.combine_ranked_stations`, specifically having at least
an index with ``usaf_id`` values and the column ``distance_meters``.
Returns
-------
isd_station, warnings : tuple of (:any:`eeweather.ISDStation`, list of str)
A qualified weather station. ``None`` if no station meets criteria.
"""
def _test_station(station):
if coverage_range is None:
return True, []
else:
start_date, end_date = coverage_range
try:
tempC, warnings = eeweather.mockable.load_isd_hourly_temp_data(
station, start_date, end_date
)
except ISDDataNotAvailableError:
return False, [] # reject
# TODO(philngo): also need to incorporate within-day limits
if len(tempC) > 0:
fraction_coverage = tempC.notnull().sum() / float(len(tempC))
return (fraction_coverage > min_fraction_coverage), warnings
else:
return False, [] # reject
def _station_warnings(station, distance_meters):
return [
EEWeatherWarning(
qualified_name="eeweather.exceeds_maximum_distance",
description=(
"Distance from target to weather station is greater"
"than the specified km."
),
data={
"distance_meters": distance_meters,
"max_distance_meters": d,
"rank": rank,
},
)
for d in distance_warnings
if distance_meters > d
]
n_stations_passed = 0
for usaf_id, row in candidates.iterrows():
station = ISDStation(usaf_id)
test_result, warnings = _test_station(station)
if test_result:
n_stations_passed += 1
if n_stations_passed == rank:
if not warnings:
warnings = []
warnings.extend(_station_warnings(station, row.distance_meters))
return station, warnings
no_station_warning = EEWeatherWarning(
qualified_name="eeweather.no_weather_station_selected",
description=(
"No weather station found with the specified rank and"
" minimum fracitional coverage."
),
data={"rank": rank, "min_fraction_coverage": min_fraction_coverage},
)
return None, [no_station_warning]
|
openeemeter/eeweather | eeweather/ranking.py | select_station | python | def select_station(
candidates,
coverage_range=None,
min_fraction_coverage=0.9,
distance_warnings=(50000, 200000),
rank=1,
):
def _test_station(station):
if coverage_range is None:
return True, []
else:
start_date, end_date = coverage_range
try:
tempC, warnings = eeweather.mockable.load_isd_hourly_temp_data(
station, start_date, end_date
)
except ISDDataNotAvailableError:
return False, [] # reject
# TODO(philngo): also need to incorporate within-day limits
if len(tempC) > 0:
fraction_coverage = tempC.notnull().sum() / float(len(tempC))
return (fraction_coverage > min_fraction_coverage), warnings
else:
return False, [] # reject
def _station_warnings(station, distance_meters):
return [
EEWeatherWarning(
qualified_name="eeweather.exceeds_maximum_distance",
description=(
"Distance from target to weather station is greater"
"than the specified km."
),
data={
"distance_meters": distance_meters,
"max_distance_meters": d,
"rank": rank,
},
)
for d in distance_warnings
if distance_meters > d
]
n_stations_passed = 0
for usaf_id, row in candidates.iterrows():
station = ISDStation(usaf_id)
test_result, warnings = _test_station(station)
if test_result:
n_stations_passed += 1
if n_stations_passed == rank:
if not warnings:
warnings = []
warnings.extend(_station_warnings(station, row.distance_meters))
return station, warnings
no_station_warning = EEWeatherWarning(
qualified_name="eeweather.no_weather_station_selected",
description=(
"No weather station found with the specified rank and"
" minimum fracitional coverage."
),
data={"rank": rank, "min_fraction_coverage": min_fraction_coverage},
)
return None, [no_station_warning] | Select a station from a list of candidates that meets given data
quality criteria.
Parameters
----------
candidates : :any:`pandas.DataFrame`
A dataframe of the form given by :any:`eeweather.rank_stations` or
:any:`eeweather.combine_ranked_stations`, specifically having at least
an index with ``usaf_id`` values and the column ``distance_meters``.
Returns
-------
isd_station, warnings : tuple of (:any:`eeweather.ISDStation`, list of str)
A qualified weather station. ``None`` if no station meets criteria. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/ranking.py#L358-L438 | [
"def _test_station(station):\n if coverage_range is None:\n return True, []\n else:\n start_date, end_date = coverage_range\n try:\n tempC, warnings = eeweather.mockable.load_isd_hourly_temp_data(\n station, start_date, end_date\n )\n except ISD... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
import numpy as np
import pyproj
import eeweather.mockable
from .exceptions import ISDDataNotAvailableError
from .connections import metadata_db_connection_proxy
from .geo import get_lat_long_climate_zones
from .stations import ISDStation
from .utils import lazy_property
from .warnings import EEWeatherWarning
__all__ = ("rank_stations", "combine_ranked_stations", "select_station")
class CachedData(object):
@lazy_property
def all_station_metadata(self):
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select
isd.usaf_id
, isd.latitude
, isd.longitude
, isd.iecc_climate_zone
, isd.iecc_moisture_regime
, isd.ba_climate_zone
, isd.ca_climate_zone
, isd.quality as rough_quality
, isd.elevation
, isd.state
, tmy3.class as tmy3_class
, tmy3.usaf_id is not null as is_tmy3
, cz2010.usaf_id is not null as is_cz2010
from
isd_station_metadata as isd
left join cz2010_station_metadata as cz2010 on
isd.usaf_id = cz2010.usaf_id
left join tmy3_station_metadata as tmy3 on
isd.usaf_id = tmy3.usaf_id
order by
isd.usaf_id
"""
)
df = pd.DataFrame(
[
{col[0]: val for col, val in zip(cur.description, row)}
for row in cur.fetchall()
],
columns=[
"usaf_id",
"latitude",
"longitude",
"iecc_climate_zone",
"iecc_moisture_regime",
"ba_climate_zone",
"ca_climate_zone",
"rough_quality",
"elevation",
"state",
"tmy3_class",
"is_tmy3",
"is_cz2010",
],
).set_index("usaf_id")
df["latitude"] = df.latitude.astype(float)
df["longitude"] = df.longitude.astype(float)
df["elevation"] = df.elevation.astype(float)
df["is_tmy3"] = df.is_tmy3.astype(bool)
df["is_cz2010"] = df.is_cz2010.astype(bool)
return df
cached_data = CachedData()
def _combine_filters(filters, index):
combined_filters = pd.Series(True, index=index)
for f in filters:
combined_filters &= f
return combined_filters
def rank_stations(
site_latitude,
site_longitude,
site_state=None,
site_elevation=None,
match_iecc_climate_zone=False,
match_iecc_moisture_regime=False,
match_ba_climate_zone=False,
match_ca_climate_zone=False,
match_state=False,
minimum_quality=None,
minimum_tmy3_class=None,
max_distance_meters=None,
max_difference_elevation_meters=None,
is_tmy3=None,
is_cz2010=None,
):
""" Get a ranked, filtered set of candidate weather stations and metadata
for a particular site.
Parameters
----------
site_latitude : float
Latitude of target site for which to find candidate weather stations.
site_longitude : float
Longitude of target site for which to find candidate weather stations.
site_state : str, 2 letter abbreviation
US state of target site, used optionally to filter potential candidate
weather stations. Ignored unless ``match_state=True``.
site_elevation : float
Elevation of target site in meters, used optionally to filter potential
candidate weather stations. Ignored unless
``max_difference_elevation_meters`` is set.
match_iecc_climate_zone : bool
If ``True``, filter candidate weather stations to those
matching the IECC climate zone of the target site.
match_iecc_moisture_regime : bool
If ``True``, filter candidate weather stations to those
matching the IECC moisture regime of the target site.
match_ca_climate_zone : bool
If ``True``, filter candidate weather stations to those
matching the CA climate zone of the target site.
match_ba_climate_zone : bool
If ``True``, filter candidate weather stations to those
matching the Building America climate zone of the target site.
match_state : bool
If ``True``, filter candidate weather stations to those
matching the US state of the target site, as specified by
``site_state=True``.
minimum_quality : str, ``'high'``, ``'medium'``, ``'low'``
If given, filter candidate weather stations to those meeting or
exceeding the given quality, as summarized by the frequency and
availability of observations in the NOAA Integrated Surface Database.
minimum_tmy3_class : str, ``'I'``, ``'II'``, ``'III'``
If given, filter candidate weather stations to those meeting or
exceeding the given class, as reported in the NREL TMY3 metadata.
max_distance_meters : float
If given, filter candidate weather stations to those within the
``max_distance_meters`` of the target site location.
max_difference_elevation_meters : float
If given, filter candidate weather stations to those with elevations
within ``max_difference_elevation_meters`` of the target site elevation.
is_tmy3 : bool
If given, filter candidate weather stations to those for which TMY3
normal year temperature data is available.
is_cz2010 : bool
If given, filter candidate weather stations to those for which CZ2010
normal year temperature data is available.
Returns
-------
ranked_filtered_candidates : :any:`pandas.DataFrame`
Index is ``usaf_id``. Each row contains a potential weather station
match and metadata. Contains the following columns:
- ``rank``: Rank of weather station match for the target site.
- ``distance_meters``: Distance from target site to weather station site.
- ``latitude``: Latitude of weather station site.
- ``longitude``: Longitude of weather station site.
- ``iecc_climate_zone``: IECC Climate Zone ID (1-8)
- ``iecc_moisture_regime``: IECC Moisture Regime ID (A-C)
- ``ba_climate_zone``: Building America climate zone name
- ``ca_climate_zone``: Califoria climate zone number
- ``rough_quality``: Approximate measure of frequency of ISD
observations data at weather station.
- ``elevation``: Elevation of weather station site, if available.
- ``state``: US state of weather station site, if applicable.
- ``tmy3_class``: Weather station class as reported by NREL TMY3, if
available
- ``is_tmy3``: Weather station has associated TMY3 data.
- ``is_cz2010``: Weather station has associated CZ2010 data.
- ``difference_elevation_meters``: Absolute difference in meters
between target site elevation and weather station elevation, if
available.
"""
candidates = cached_data.all_station_metadata
# compute distances
candidates_defined_lat_long = candidates[
candidates.latitude.notnull() & candidates.longitude.notnull()
]
candidates_latitude = candidates_defined_lat_long.latitude
candidates_longitude = candidates_defined_lat_long.longitude
tiled_site_latitude = np.tile(site_latitude, candidates_latitude.shape)
tiled_site_longitude = np.tile(site_longitude, candidates_longitude.shape)
geod = pyproj.Geod(ellps="WGS84")
dists = geod.inv(
tiled_site_longitude,
tiled_site_latitude,
candidates_longitude.values,
candidates_latitude.values,
)[2]
distance_meters = pd.Series(dists, index=candidates_defined_lat_long.index).reindex(
candidates.index
)
candidates["distance_meters"] = distance_meters
if site_elevation is not None:
difference_elevation_meters = (candidates.elevation - site_elevation).abs()
else:
difference_elevation_meters = None
candidates["difference_elevation_meters"] = difference_elevation_meters
site_climate_zones = get_lat_long_climate_zones(site_latitude, site_longitude)
site_iecc_climate_zone = site_climate_zones["iecc_climate_zone"]
site_iecc_moisture_regime = site_climate_zones["iecc_moisture_regime"]
site_ca_climate_zone = site_climate_zones["ca_climate_zone"]
site_ba_climate_zone = site_climate_zones["ba_climate_zone"]
# create filters
filters = []
if match_iecc_climate_zone:
if site_iecc_climate_zone is None:
filters.append(candidates.iecc_climate_zone.isnull())
else:
filters.append(candidates.iecc_climate_zone == site_iecc_climate_zone)
if match_iecc_moisture_regime:
if site_iecc_moisture_regime is None:
filters.append(candidates.iecc_moisture_regime.isnull())
else:
filters.append(candidates.iecc_moisture_regime == site_iecc_moisture_regime)
if match_ba_climate_zone:
if site_ba_climate_zone is None:
filters.append(candidates.ba_climate_zone.isnull())
else:
filters.append(candidates.ba_climate_zone == site_ba_climate_zone)
if match_ca_climate_zone:
if site_ca_climate_zone is None:
filters.append(candidates.ca_climate_zone.isnull())
else:
filters.append(candidates.ca_climate_zone == site_ca_climate_zone)
if match_state:
if site_state is None:
filters.append(candidates.state.isnull())
else:
filters.append(candidates.state == site_state)
if is_tmy3 is not None:
filters.append(candidates.is_tmy3.isin([is_tmy3]))
if is_cz2010 is not None:
filters.append(candidates.is_cz2010.isin([is_cz2010]))
if minimum_quality == "low":
filters.append(candidates.rough_quality.isin(["high", "medium", "low"]))
elif minimum_quality == "medium":
filters.append(candidates.rough_quality.isin(["high", "medium"]))
elif minimum_quality == "high":
filters.append(candidates.rough_quality.isin(["high"]))
if minimum_tmy3_class == "III":
filters.append(candidates.tmy3_class.isin(["I", "II", "III"]))
elif minimum_tmy3_class == "II":
filters.append(candidates.tmy3_class.isin(["I", "II"]))
elif minimum_tmy3_class == "I":
filters.append(candidates.tmy3_class.isin(["I"]))
if max_distance_meters is not None:
filters.append(candidates.distance_meters <= max_distance_meters)
if max_difference_elevation_meters is not None and site_elevation is not None:
filters.append(
candidates.difference_elevation_meters <= max_difference_elevation_meters
)
combined_filters = _combine_filters(filters, candidates.index)
filtered_candidates = candidates[combined_filters]
ranked_filtered_candidates = filtered_candidates.sort_values(by=["distance_meters"])
# add rank column
ranks = range(1, 1 + len(ranked_filtered_candidates))
ranked_filtered_candidates.insert(0, "rank", ranks)
return ranked_filtered_candidates[
[
"rank",
"distance_meters",
"latitude",
"longitude",
"iecc_climate_zone",
"iecc_moisture_regime",
"ba_climate_zone",
"ca_climate_zone",
"rough_quality",
"elevation",
"state",
"tmy3_class",
"is_tmy3",
"is_cz2010",
"difference_elevation_meters",
]
]
def combine_ranked_stations(rankings):
""" Combine :any:`pandas.DataFrame` s of candidate weather stations to form
a hybrid ranking dataframe.
Parameters
----------
rankings : list of :any:`pandas.DataFrame`
Dataframes of ranked weather station candidates and metadata.
All ranking dataframes should have the same columns and must be
sorted by rank.
Returns
-------
ranked_filtered_candidates : :any:`pandas.DataFrame`
Dataframe has a rank column and the same columns given in the source
dataframes.
"""
if len(rankings) == 0:
raise ValueError("Requires at least one ranking.")
combined_ranking = rankings[0]
for ranking in rankings[1:]:
filtered_ranking = ranking[~ranking.index.isin(combined_ranking.index)]
combined_ranking = pd.concat([combined_ranking, filtered_ranking])
combined_ranking["rank"] = range(1, 1 + len(combined_ranking))
return combined_ranking
@eeweather.mockable.mockable()
def load_isd_hourly_temp_data(station, start_date, end_date): # pragma: no cover
return station.load_isd_hourly_temp_data(start_date, end_date)
|
openeemeter/eeweather | eeweather/database.py | _load_isd_station_metadata | python | def _load_isd_station_metadata(download_path):
from shapely.geometry import Point
# load ISD history which contains metadata
isd_history = pd.read_csv(
os.path.join(download_path, "isd-history.csv"),
dtype=str,
parse_dates=["BEGIN", "END"],
)
hasGEO = (
isd_history.LAT.notnull() & isd_history.LON.notnull() & (isd_history.LAT != 0)
)
isUS = (
((isd_history.CTRY == "US") & (isd_history.STATE.notnull()))
# AQ = American Samoa, GQ = Guam, RQ = Peurto Rico, VQ = Virgin Islands
| (isd_history.CTRY.str[1] == "Q")
)
hasUSAF = isd_history.USAF != "999999"
metadata = {}
for usaf_station, group in isd_history[hasGEO & isUS & hasUSAF].groupby("USAF"):
# find most recent
recent = group.loc[group.END.idxmax()]
wban_stations = list(group.WBAN)
metadata[usaf_station] = {
"usaf_id": usaf_station,
"wban_ids": wban_stations,
"recent_wban_id": recent.WBAN,
"name": recent["STATION NAME"],
"icao_code": recent.ICAO,
"latitude": recent.LAT if recent.LAT not in ("+00.000",) else None,
"longitude": recent.LON if recent.LON not in ("+000.000",) else None,
"point": Point(float(recent.LON), float(recent.LAT)),
"elevation": recent["ELEV(M)"]
if not str(float(recent["ELEV(M)"])).startswith("-999")
else None,
"state": recent.STATE,
}
return metadata | Collect metadata for US isd stations. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/database.py#L160-L202 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import defaultdict
from datetime import datetime, timedelta
import json
import logging
import os
import shutil
import subprocess
import tempfile
import pandas as pd
import numpy as np
from .connections import noaa_ftp_connection_proxy, metadata_db_connection_proxy
logger = logging.getLogger(__name__)
__all__ = ("build_metadata_db", "inspect_metadata_db")
CZ2010_LIST = [
"725958",
"725945",
"723840",
"724837",
"724800",
"725845",
"747188",
"722880",
"723926",
"722926",
"722927",
"746120",
"722899",
"724936",
"725946",
"723815",
"723810",
"722810",
"725940",
"723890",
"722976",
"724935",
"747185",
"722909",
"723826",
"722956",
"725847",
"723816",
"747020",
"724927",
"722895",
"722970",
"722975",
"722874",
"722950",
"724815",
"724926",
"722953",
"725955",
"724915",
"725957",
"724955",
"723805",
"724930",
"723927",
"722868",
"747187",
"723820",
"724937",
"723965",
"723910",
"723895",
"725910",
"725920",
"722860",
"722869",
"724830",
"724839",
"724917",
"724938",
"722925",
"722907",
"722900",
"722903",
"722906",
"724940",
"724945",
"724946",
"722897",
"722910",
"723830",
"722977",
"723925",
"723940",
"722885",
"724957",
"724920",
"722955",
"745160",
"725846",
"690150",
"725905",
"722886",
"723930",
"723896",
"724838",
]
class PrettyFloat(float):
def __repr__(self):
return "%.7g" % self
def pretty_floats(obj):
if isinstance(obj, float):
return PrettyFloat(round(obj, 4))
elif isinstance(obj, dict):
return dict((k, pretty_floats(v)) for k, v in obj.items())
elif isinstance(obj, (list, tuple)):
return list(map(pretty_floats, obj))
return obj
def to_geojson(polygon):
import simplejson
from shapely.geometry import mapping
return simplejson.dumps(pretty_floats(mapping(polygon)), separators=(",", ":"))
def _download_primary_sources():
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
scripts_path = os.path.join(root_dir, "scripts", "download_primary_sources.sh")
download_path = tempfile.mkdtemp()
subprocess.call([scripts_path, download_path])
return download_path
def _load_isd_file_metadata(download_path, isd_station_metadata):
""" Collect data counts for isd files.
"""
isd_inventory = pd.read_csv(
os.path.join(download_path, "isd-inventory.csv"), dtype=str
)
# filter to stations with metadata
station_keep = [usaf in isd_station_metadata for usaf in isd_inventory.USAF]
isd_inventory = isd_inventory[station_keep]
# filter by year
year_keep = isd_inventory.YEAR > "2005"
isd_inventory = isd_inventory[year_keep]
metadata = {}
for (usaf_station, year), group in isd_inventory.groupby(["USAF", "YEAR"]):
if usaf_station not in metadata:
metadata[usaf_station] = {"usaf_id": usaf_station, "years": {}}
metadata[usaf_station]["years"][year] = [
{
"wban_id": row.WBAN,
"counts": [
row.JAN,
row.FEB,
row.MAR,
row.APR,
row.MAY,
row.JUN,
row.JUL,
row.AUG,
row.SEP,
row.OCT,
row.NOV,
row.DEC,
],
}
for i, row in group.iterrows()
]
return metadata
def _compute_isd_station_quality(
isd_station_metadata,
isd_file_metadata,
end_year=None,
years_back=None,
quality_func=None,
):
if end_year is None:
end_year = datetime.now().year - 1 # last full year
if years_back is None:
years_back = 5
if quality_func is None:
def quality_func(values):
minimum = values.min()
if minimum > 24 * 25:
return "high"
elif minimum > 24 * 15:
return "medium"
else:
return "low"
# e.g., if end_year == 2017, year_range = ["2013", "2014", ..., "2017"]
year_range = set([str(y) for y in range(end_year - (years_back - 1), end_year + 1)])
def _compute_station_quality(usaf_id):
years_data = isd_file_metadata.get(usaf_id, {}).get("years", {})
if not all([year in years_data for year in year_range]):
return quality_func(np.repeat(0, 60))
counts = defaultdict(lambda: 0)
for y, year in enumerate(year_range):
for station in years_data[year]:
for m, month_counts in enumerate(station["counts"]):
counts[y * 12 + m] += int(month_counts)
return quality_func(np.array(list(counts.values())))
# figure out counts for years of interest
for usaf_id, metadata in isd_station_metadata.items():
metadata["quality"] = _compute_station_quality(usaf_id)
def _load_zcta_metadata(download_path):
from shapely.geometry import shape
# load zcta geojson
geojson_path = os.path.join(download_path, "cb_2016_us_zcta510_500k.json")
with open(geojson_path, "r") as f:
geojson = json.load(f)
# load ZIP code prefixes by state
zipcode_prefixes_path = os.path.join(download_path, "zipcode_prefixes.json")
with open(zipcode_prefixes_path, "r") as f:
zipcode_prefixes = json.load(f)
prefix_to_zipcode = {
zipcode_prefix: state
for state, zipcode_prefix_list in zipcode_prefixes.items()
for zipcode_prefix in zipcode_prefix_list
}
def _get_state(zcta):
prefix = zcta[:3]
return prefix_to_zipcode.get(prefix)
metadata = {}
for feature in geojson["features"]:
zcta = feature["properties"]["GEOID10"]
geometry = feature["geometry"]
polygon = shape(geometry)
centroid = polygon.centroid
state = _get_state(zcta)
metadata[zcta] = {
"zcta": zcta,
"polygon": polygon,
"geometry": to_geojson(polygon),
"centroid": centroid,
"latitude": centroid.coords[0][1],
"longitude": centroid.coords[0][0],
"state": state,
}
return metadata
def _load_county_metadata(download_path):
from shapely.geometry import shape
# load county geojson
geojson_path = os.path.join(download_path, "cb_2016_us_county_500k.json")
with open(geojson_path, "r") as f:
geojson = json.load(f)
metadata = {}
for feature in geojson["features"]:
county = feature["properties"]["GEOID"]
geometry = feature["geometry"]
polygon = shape(geometry)
centroid = polygon.centroid
metadata[county] = {
"county": county,
"polygon": polygon,
"geometry": to_geojson(polygon),
"centroid": centroid,
"latitude": centroid.coords[0][1],
"longitude": centroid.coords[0][0],
}
# load county climate zones
county_climate_zones = pd.read_csv(
os.path.join(download_path, "climate_zones.csv"),
dtype=str,
usecols=[
"State FIPS",
"County FIPS",
"IECC Climate Zone",
"IECC Moisture Regime",
"BA Climate Zone",
"County Name",
],
)
for i, row in county_climate_zones.iterrows():
county = row["State FIPS"] + row["County FIPS"]
if county not in metadata:
logger.warn(
"Could not find geometry for county {}, skipping.".format(county)
)
continue
metadata[county].update(
{
"name": row["County Name"],
"iecc_climate_zone": row["IECC Climate Zone"],
"iecc_moisture_regime": (
row["IECC Moisture Regime"]
if not pd.isnull(row["IECC Moisture Regime"])
else None
),
"ba_climate_zone": row["BA Climate Zone"],
}
)
return metadata
def _load_CA_climate_zone_metadata(download_path):
from shapely.geometry import shape, mapping
ca_climate_zone_names = {
"01": "Arcata",
"02": "Santa Rosa",
"03": "Oakland",
"04": "San Jose-Reid",
"05": "Santa Maria",
"06": "Torrance",
"07": "San Diego-Lindbergh",
"08": "Fullerton",
"09": "Burbank-Glendale",
"10": "Riverside",
"11": "Red Bluff",
"12": "Sacramento",
"13": "Fresno",
"14": "Palmdale",
"15": "Palm Spring-Intl",
"16": "Blue Canyon",
}
geojson_path = os.path.join(
download_path, "CA_Building_Standards_Climate_Zones.json"
)
with open(geojson_path, "r") as f:
geojson = json.load(f)
metadata = {}
for feature in geojson["features"]:
zone = "{:02d}".format(int(feature["properties"]["Zone"]))
geometry = feature["geometry"]
polygon = shape(geometry)
metadata[zone] = {
"ca_climate_zone": "CA_{}".format(zone),
"name": ca_climate_zone_names[zone],
"polygon": polygon,
"geometry": to_geojson(polygon),
}
return metadata
def _load_tmy3_station_metadata(download_path):
from bs4 import BeautifulSoup
path = os.path.join(download_path, "tmy3-stations.html")
with open(path, "r") as f:
soup = BeautifulSoup(f.read(), "html.parser")
tmy3_station_elements = soup.select("td .hide")
metadata = {}
for station_el in tmy3_station_elements:
station_name_el = station_el.findNext("td").findNext("td")
station_class_el = station_name_el.findNext("td")
usaf_id = station_el.text.strip()
name = (
"".join(station_name_el.text.split(",")[:-1])
.replace("\n", "")
.replace("\t", "")
.strip()
)
metadata[usaf_id] = {
"usaf_id": usaf_id,
"name": name,
"state": station_name_el.text.split(",")[-1].strip(),
"class": station_class_el.text.split()[1].strip(),
}
return metadata
def _load_cz2010_station_metadata():
return {usaf_id: {"usaf_id": usaf_id} for usaf_id in CZ2010_LIST}
def _create_merged_climate_zones_metadata(county_metadata):
from shapely.ops import cascaded_union
iecc_climate_zone_polygons = defaultdict(list)
iecc_moisture_regime_polygons = defaultdict(list)
ba_climate_zone_polygons = defaultdict(list)
for county in county_metadata.values():
polygon = county["polygon"]
iecc_climate_zone = county.get("iecc_climate_zone")
iecc_moisture_regime = county.get("iecc_moisture_regime")
ba_climate_zone = county.get("ba_climate_zone")
if iecc_climate_zone is not None:
iecc_climate_zone_polygons[iecc_climate_zone].append(polygon)
if iecc_moisture_regime is not None:
iecc_moisture_regime_polygons[iecc_moisture_regime].append(polygon)
if ba_climate_zone is not None:
ba_climate_zone_polygons[ba_climate_zone].append(polygon)
iecc_climate_zone_metadata = {}
for iecc_climate_zone, polygons in iecc_climate_zone_polygons.items():
polygon = cascaded_union(polygons)
polygon = polygon.simplify(0.01)
iecc_climate_zone_metadata[iecc_climate_zone] = {
"iecc_climate_zone": iecc_climate_zone,
"polygon": polygon,
"geometry": to_geojson(polygon),
}
iecc_moisture_regime_metadata = {}
for iecc_moisture_regime, polygons in iecc_moisture_regime_polygons.items():
polygon = cascaded_union(polygons)
polygon = polygon.simplify(0.01)
iecc_moisture_regime_metadata[iecc_moisture_regime] = {
"iecc_moisture_regime": iecc_moisture_regime,
"polygon": polygon,
"geometry": to_geojson(polygon),
}
ba_climate_zone_metadata = {}
for ba_climate_zone, polygons in ba_climate_zone_polygons.items():
polygon = cascaded_union(polygons)
polygon = polygon.simplify(0.01)
ba_climate_zone_metadata[ba_climate_zone] = {
"ba_climate_zone": ba_climate_zone,
"polygon": polygon,
"geometry": to_geojson(polygon),
}
return (
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
)
def _compute_containment(
point_metadata, point_id_field, polygon_metadata, polygon_metadata_field
):
from shapely.vectorized import contains
points, lats, lons = zip(
*[
(point, point["latitude"], point["longitude"])
for point in point_metadata.values()
]
)
for i, polygon in enumerate(polygon_metadata.values()):
containment = contains(polygon["polygon"], lons, lats)
for point, c in zip(points, containment):
if c:
point[polygon_metadata_field] = polygon[polygon_metadata_field]
# fill in with None
for point in point_metadata.values():
point[polygon_metadata_field] = point.get(polygon_metadata_field, None)
def _map_zcta_to_climate_zones(
zcta_metadata,
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
ca_climate_zone_metadata,
):
_compute_containment(
zcta_metadata, "zcta", iecc_climate_zone_metadata, "iecc_climate_zone"
)
_compute_containment(
zcta_metadata, "zcta", iecc_moisture_regime_metadata, "iecc_moisture_regime"
)
_compute_containment(
zcta_metadata, "zcta", ba_climate_zone_metadata, "ba_climate_zone"
)
_compute_containment(
zcta_metadata, "zcta", ca_climate_zone_metadata, "ca_climate_zone"
)
def _map_isd_station_to_climate_zones(
isd_station_metadata,
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
ca_climate_zone_metadata,
):
_compute_containment(
isd_station_metadata, "usaf_id", iecc_climate_zone_metadata, "iecc_climate_zone"
)
_compute_containment(
isd_station_metadata,
"usaf_id",
iecc_moisture_regime_metadata,
"iecc_moisture_regime",
)
_compute_containment(
isd_station_metadata, "usaf_id", ba_climate_zone_metadata, "ba_climate_zone"
)
_compute_containment(
isd_station_metadata, "usaf_id", ca_climate_zone_metadata, "ca_climate_zone"
)
def _find_zcta_closest_isd_stations(zcta_metadata, isd_station_metadata, limit=None):
if limit is None:
limit = 10
import pyproj
geod = pyproj.Geod(ellps="WGS84")
isd_usaf_ids, isd_lats, isd_lngs = zip(
*[
(
isd_station["usaf_id"],
float(isd_station["latitude"]),
float(isd_station["longitude"]),
)
for isd_station in isd_station_metadata.values()
]
)
isd_lats = np.array(isd_lats)
isd_lngs = np.array(isd_lngs)
for zcta in zcta_metadata.values():
zcta_lats = np.tile(zcta["latitude"], isd_lats.shape)
zcta_lngs = np.tile(zcta["longitude"], isd_lngs.shape)
dists = geod.inv(zcta_lngs, zcta_lats, isd_lngs, isd_lats)[2]
sorted_dists = np.argsort(dists)[:limit]
closest_isd_stations = []
for i, idx in enumerate(sorted_dists):
usaf_id = isd_usaf_ids[idx]
isd_station = isd_station_metadata[usaf_id]
closest_isd_stations.append(
{
"usaf_id": usaf_id,
"distance_meters": int(round(dists[idx])),
"rank": i + 1,
"iecc_climate_zone_match": (
zcta.get("iecc_climate_zone")
== isd_station.get("iecc_climate_zone")
),
"iecc_moisture_regime_match": (
zcta.get("iecc_moisture_regime")
== isd_station.get("iecc_moisture_regime")
),
"ba_climate_zone_match": (
zcta.get("ba_climate_zone")
== isd_station.get("ba_climate_zone")
),
"ca_climate_zone_match": (
zcta.get("ca_climate_zone")
== isd_station.get("ca_climate_zone")
),
}
)
zcta["closest_isd_stations"] = closest_isd_stations
def _create_table_structures(conn):
cur = conn.cursor()
cur.execute(
"""
create table isd_station_metadata (
usaf_id text not null
, wban_ids text not null
, recent_wban_id text not null
, name text not null
, icao_code text
, latitude text
, longitude text
, elevation text
, state text
, quality text default 'low'
, iecc_climate_zone text
, iecc_moisture_regime text
, ba_climate_zone text
, ca_climate_zone text
)
"""
)
cur.execute(
"""
create table isd_file_metadata (
usaf_id text not null
, year text not null
, wban_id text not null
)
"""
)
cur.execute(
"""
create table zcta_metadata (
zcta_id text not null
, geometry text
, latitude text not null
, longitude text not null
, state text
, iecc_climate_zone text
, iecc_moisture_regime text
, ba_climate_zone text
, ca_climate_zone text
)
"""
)
cur.execute(
"""
create table iecc_climate_zone_metadata (
iecc_climate_zone text not null
, geometry text
)
"""
)
cur.execute(
"""
create table iecc_moisture_regime_metadata (
iecc_moisture_regime text not null
, geometry text
)
"""
)
cur.execute(
"""
create table ba_climate_zone_metadata (
ba_climate_zone text not null
, geometry text
)
"""
)
cur.execute(
"""
create table ca_climate_zone_metadata (
ca_climate_zone text not null
, name text not null
, geometry text
)
"""
)
cur.execute(
"""
create table tmy3_station_metadata (
usaf_id text not null
, name text not null
, state text not null
, class text not null
)
"""
)
cur.execute(
"""
create table cz2010_station_metadata (
usaf_id text not null
)
"""
)
def _write_isd_station_metadata_table(conn, isd_station_metadata):
cur = conn.cursor()
rows = [
(
metadata["usaf_id"],
",".join(metadata["wban_ids"]),
metadata["recent_wban_id"],
metadata["name"],
metadata["icao_code"],
metadata["latitude"],
metadata["longitude"],
metadata["elevation"],
metadata["state"],
metadata["quality"],
metadata["iecc_climate_zone"],
metadata["iecc_moisture_regime"],
metadata["ba_climate_zone"],
metadata["ca_climate_zone"],
)
for station, metadata in sorted(isd_station_metadata.items())
]
cur.executemany(
"""
insert into isd_station_metadata(
usaf_id
, wban_ids
, recent_wban_id
, name
, icao_code
, latitude
, longitude
, elevation
, state
, quality
, iecc_climate_zone
, iecc_moisture_regime
, ba_climate_zone
, ca_climate_zone
) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?)
""",
rows,
)
cur.execute(
"""
create index isd_station_metadata_usaf_id on isd_station_metadata(usaf_id)
"""
)
cur.execute(
"""
create index isd_station_metadata_state on isd_station_metadata(state)
"""
)
cur.execute(
"""
create index isd_station_metadata_iecc_climate_zone on
isd_station_metadata(iecc_climate_zone)
"""
)
cur.execute(
"""
create index isd_station_metadata_iecc_moisture_regime on
isd_station_metadata(iecc_moisture_regime)
"""
)
cur.execute(
"""
create index isd_station_metadata_ba_climate_zone on
isd_station_metadata(ba_climate_zone)
"""
)
cur.execute(
"""
create index isd_station_metadata_ca_climate_zone on
isd_station_metadata(ca_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_isd_file_metadata_table(conn, isd_file_metadata):
cur = conn.cursor()
rows = [
(metadata["usaf_id"], year, station_data["wban_id"])
for isd_station, metadata in sorted(isd_file_metadata.items())
for year, year_data in sorted(metadata["years"].items())
for station_data in year_data
]
cur.executemany(
"""
insert into isd_file_metadata(
usaf_id
, year
, wban_id
) values (?,?,?)
""",
rows,
)
cur.execute(
"""
create index isd_file_metadata_usaf_id on
isd_file_metadata(usaf_id)
"""
)
cur.execute(
"""
create index isd_file_metadata_year on
isd_file_metadata(year)
"""
)
cur.execute(
"""
create index isd_file_metadata_usaf_id_year on
isd_file_metadata(usaf_id, year)
"""
)
cur.execute(
"""
create index isd_file_metadata_wban_id on
isd_file_metadata(wban_id)
"""
)
cur.close()
conn.commit()
def _write_zcta_metadata_table(conn, zcta_metadata, geometry=False):
cur = conn.cursor()
rows = [
(
metadata["zcta"],
metadata["geometry"] if geometry else None,
metadata["latitude"],
metadata["longitude"],
metadata["state"],
metadata["iecc_climate_zone"],
metadata["iecc_moisture_regime"],
metadata["ba_climate_zone"],
metadata["ca_climate_zone"],
)
for zcta, metadata in sorted(zcta_metadata.items())
]
cur.executemany(
"""
insert into zcta_metadata(
zcta_id
, geometry
, latitude
, longitude
, state
, iecc_climate_zone
, iecc_moisture_regime
, ba_climate_zone
, ca_climate_zone
) values (?,?,?,?,?,?,?,?,?)
""",
rows,
)
cur.execute(
"""
create index zcta_metadata_zcta_id on zcta_metadata(zcta_id)
"""
)
cur.execute(
"""
create index zcta_metadata_state on zcta_metadata(state)
"""
)
cur.execute(
"""
create index zcta_metadata_iecc_climate_zone on zcta_metadata(iecc_climate_zone)
"""
)
cur.execute(
"""
create index zcta_metadata_iecc_moisture_regime on zcta_metadata(iecc_moisture_regime)
"""
)
cur.execute(
"""
create index zcta_metadata_ba_climate_zone on zcta_metadata(ba_climate_zone)
"""
)
cur.execute(
"""
create index zcta_metadata_ca_climate_zone on zcta_metadata(ca_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_iecc_climate_zone_metadata_table(
conn, iecc_climate_zone_metadata, geometry=True
):
cur = conn.cursor()
rows = [
(metadata["iecc_climate_zone"], metadata["geometry"] if geometry else None)
for iecc_climate_zone, metadata in sorted(iecc_climate_zone_metadata.items())
]
cur.executemany(
"""
insert into iecc_climate_zone_metadata(
iecc_climate_zone
, geometry
) values (?,?)
""",
rows,
)
cur.execute(
"""
create index iecc_climate_zone_metadata_iecc_climate_zone on
iecc_climate_zone_metadata(iecc_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_iecc_moisture_regime_metadata_table(
conn, iecc_moisture_regime_metadata, geometry=True
):
cur = conn.cursor()
rows = [
(metadata["iecc_moisture_regime"], metadata["geometry"] if geometry else None)
for iecc_moisture_regime, metadata in sorted(
iecc_moisture_regime_metadata.items()
)
]
cur.executemany(
"""
insert into iecc_moisture_regime_metadata(
iecc_moisture_regime
, geometry
) values (?,?)
""",
rows,
)
cur.execute(
"""
create index iecc_moisture_regime_metadata_iecc_moisture_regime on
iecc_moisture_regime_metadata(iecc_moisture_regime)
"""
)
cur.close()
conn.commit()
def _write_ba_climate_zone_metadata_table(
conn, ba_climate_zone_metadata, geometry=True
):
cur = conn.cursor()
rows = [
(metadata["ba_climate_zone"], metadata["geometry"] if geometry else None)
for ba_climate_zone, metadata in sorted(ba_climate_zone_metadata.items())
]
cur.executemany(
"""
insert into ba_climate_zone_metadata(
ba_climate_zone
, geometry
) values (?,?)
""",
rows,
)
cur.execute(
"""
create index ba_climate_zone_metadata_ba_climate_zone on
ba_climate_zone_metadata(ba_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_ca_climate_zone_metadata_table(
conn, ca_climate_zone_metadata, geometry=True
):
cur = conn.cursor()
rows = [
(
metadata["ca_climate_zone"],
metadata["name"],
metadata["geometry"] if geometry else None,
)
for ca_climate_zone, metadata in sorted(ca_climate_zone_metadata.items())
]
cur.executemany(
"""
insert into ca_climate_zone_metadata(
ca_climate_zone
, name
, geometry
) values (?,?,?)
""",
rows,
)
cur.execute(
"""
create index ca_climate_zone_metadata_ca_climate_zone on
ca_climate_zone_metadata(ca_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_tmy3_station_metadata_table(conn, tmy3_station_metadata):
cur = conn.cursor()
rows = [
(metadata["usaf_id"], metadata["name"], metadata["state"], metadata["class"])
for tmy3_station, metadata in sorted(tmy3_station_metadata.items())
]
cur.executemany(
"""
insert into tmy3_station_metadata(
usaf_id
, name
, state
, class
) values (?,?,?,?)
""",
rows,
)
cur.execute(
"""
create index tmy3_station_metadata_usaf_id on
tmy3_station_metadata(usaf_id)
"""
)
cur.close()
conn.commit()
def _write_cz2010_station_metadata_table(conn, cz2010_station_metadata):
cur = conn.cursor()
rows = [
(metadata["usaf_id"],)
for cz2010_station, metadata in sorted(cz2010_station_metadata.items())
]
cur.executemany(
"""
insert into cz2010_station_metadata(
usaf_id
) values (?)
""",
rows,
)
cur.execute(
"""
create index cz2010_station_metadata_usaf_id on
cz2010_station_metadata(usaf_id)
"""
)
cur.close()
conn.commit()
def build_metadata_db(
zcta_geometry=False,
iecc_climate_zone_geometry=True,
iecc_moisture_regime_geometry=True,
ba_climate_zone_geometry=True,
ca_climate_zone_geometry=True,
):
""" Build database of metadata from primary sources.
Downloads primary sources, clears existing DB, and rebuilds from scratch.
Parameters
----------
zcta_geometry : bool, optional
Whether or not to include ZCTA geometry in database.
iecc_climate_zone_geometry : bool, optional
Whether or not to include IECC Climate Zone geometry in database.
iecc_moisture_regime_geometry : bool, optional
Whether or not to include IECC Moisture Regime geometry in database.
ba_climate_zone_geometry : bool, optional
Whether or not to include Building America Climate Zone geometry in database.
ca_climate_zone_geometry : bool, optional
Whether or not to include California Building Climate Zone Area geometry in database.
"""
try:
import shapely
except ImportError:
raise ImportError("Loading polygons requires shapely.")
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError("Scraping TMY3 station data requires beautifulsoup4.")
try:
import pyproj
except ImportError:
raise ImportError("Computing distances requires pyproj.")
try:
import simplejson
except ImportError:
raise ImportError("Writing geojson requires simplejson.")
download_path = _download_primary_sources()
conn = metadata_db_connection_proxy.reset_database()
# Load data into memory
print("Loading ZCTAs")
zcta_metadata = _load_zcta_metadata(download_path)
print("Loading counties")
county_metadata = _load_county_metadata(download_path)
print("Merging county climate zones")
(
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
) = _create_merged_climate_zones_metadata(county_metadata)
print("Loading CA climate zones")
ca_climate_zone_metadata = _load_CA_climate_zone_metadata(download_path)
print("Loading ISD station metadata")
isd_station_metadata = _load_isd_station_metadata(download_path)
print("Loading ISD station file metadata")
isd_file_metadata = _load_isd_file_metadata(download_path, isd_station_metadata)
print("Loading TMY3 station metadata")
tmy3_station_metadata = _load_tmy3_station_metadata(download_path)
print("Loading CZ2010 station metadata")
cz2010_station_metadata = _load_cz2010_station_metadata()
# Augment data in memory
print("Computing ISD station quality")
# add rough station quality to station metadata
# (all months in last 5 years have at least 600 points)
_compute_isd_station_quality(isd_station_metadata, isd_file_metadata)
print("Mapping ZCTAs to climate zones")
# add county and ca climate zone mappings
_map_zcta_to_climate_zones(
zcta_metadata,
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
ca_climate_zone_metadata,
)
print("Mapping ISD stations to climate zones")
# add county and ca climate zone mappings
_map_isd_station_to_climate_zones(
isd_station_metadata,
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
ca_climate_zone_metadata,
)
# Write tables
print("Creating table structures")
_create_table_structures(conn)
print("Writing ZCTA data")
_write_zcta_metadata_table(conn, zcta_metadata, geometry=zcta_geometry)
print("Writing IECC climate zone data")
_write_iecc_climate_zone_metadata_table(
conn, iecc_climate_zone_metadata, geometry=iecc_climate_zone_geometry
)
print("Writing IECC moisture regime data")
_write_iecc_moisture_regime_metadata_table(
conn, iecc_moisture_regime_metadata, geometry=iecc_moisture_regime_geometry
)
print("Writing BA climate zone data")
_write_ba_climate_zone_metadata_table(
conn, ba_climate_zone_metadata, geometry=ba_climate_zone_geometry
)
print("Writing CA climate zone data")
_write_ca_climate_zone_metadata_table(
conn, ca_climate_zone_metadata, geometry=ca_climate_zone_geometry
)
print("Writing ISD station metadata")
_write_isd_station_metadata_table(conn, isd_station_metadata)
print("Writing ISD file metadata")
_write_isd_file_metadata_table(conn, isd_file_metadata)
print("Writing TMY3 station metadata")
_write_tmy3_station_metadata_table(conn, tmy3_station_metadata)
print("Writing CZ2010 station metadata")
_write_cz2010_station_metadata_table(conn, cz2010_station_metadata)
print("Cleaning up...")
shutil.rmtree(download_path)
print("\u2728 Completed! \u2728")
def inspect_metadata_db():
subprocess.call(["sqlite3", metadata_db_connection_proxy.db_path])
|
openeemeter/eeweather | eeweather/database.py | _load_isd_file_metadata | python | def _load_isd_file_metadata(download_path, isd_station_metadata):
isd_inventory = pd.read_csv(
os.path.join(download_path, "isd-inventory.csv"), dtype=str
)
# filter to stations with metadata
station_keep = [usaf in isd_station_metadata for usaf in isd_inventory.USAF]
isd_inventory = isd_inventory[station_keep]
# filter by year
year_keep = isd_inventory.YEAR > "2005"
isd_inventory = isd_inventory[year_keep]
metadata = {}
for (usaf_station, year), group in isd_inventory.groupby(["USAF", "YEAR"]):
if usaf_station not in metadata:
metadata[usaf_station] = {"usaf_id": usaf_station, "years": {}}
metadata[usaf_station]["years"][year] = [
{
"wban_id": row.WBAN,
"counts": [
row.JAN,
row.FEB,
row.MAR,
row.APR,
row.MAY,
row.JUN,
row.JUL,
row.AUG,
row.SEP,
row.OCT,
row.NOV,
row.DEC,
],
}
for i, row in group.iterrows()
]
return metadata | Collect data counts for isd files. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/database.py#L205-L244 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import defaultdict
from datetime import datetime, timedelta
import json
import logging
import os
import shutil
import subprocess
import tempfile
import pandas as pd
import numpy as np
from .connections import noaa_ftp_connection_proxy, metadata_db_connection_proxy
logger = logging.getLogger(__name__)
__all__ = ("build_metadata_db", "inspect_metadata_db")
CZ2010_LIST = [
"725958",
"725945",
"723840",
"724837",
"724800",
"725845",
"747188",
"722880",
"723926",
"722926",
"722927",
"746120",
"722899",
"724936",
"725946",
"723815",
"723810",
"722810",
"725940",
"723890",
"722976",
"724935",
"747185",
"722909",
"723826",
"722956",
"725847",
"723816",
"747020",
"724927",
"722895",
"722970",
"722975",
"722874",
"722950",
"724815",
"724926",
"722953",
"725955",
"724915",
"725957",
"724955",
"723805",
"724930",
"723927",
"722868",
"747187",
"723820",
"724937",
"723965",
"723910",
"723895",
"725910",
"725920",
"722860",
"722869",
"724830",
"724839",
"724917",
"724938",
"722925",
"722907",
"722900",
"722903",
"722906",
"724940",
"724945",
"724946",
"722897",
"722910",
"723830",
"722977",
"723925",
"723940",
"722885",
"724957",
"724920",
"722955",
"745160",
"725846",
"690150",
"725905",
"722886",
"723930",
"723896",
"724838",
]
class PrettyFloat(float):
def __repr__(self):
return "%.7g" % self
def pretty_floats(obj):
if isinstance(obj, float):
return PrettyFloat(round(obj, 4))
elif isinstance(obj, dict):
return dict((k, pretty_floats(v)) for k, v in obj.items())
elif isinstance(obj, (list, tuple)):
return list(map(pretty_floats, obj))
return obj
def to_geojson(polygon):
import simplejson
from shapely.geometry import mapping
return simplejson.dumps(pretty_floats(mapping(polygon)), separators=(",", ":"))
def _download_primary_sources():
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
scripts_path = os.path.join(root_dir, "scripts", "download_primary_sources.sh")
download_path = tempfile.mkdtemp()
subprocess.call([scripts_path, download_path])
return download_path
def _load_isd_station_metadata(download_path):
""" Collect metadata for US isd stations.
"""
from shapely.geometry import Point
# load ISD history which contains metadata
isd_history = pd.read_csv(
os.path.join(download_path, "isd-history.csv"),
dtype=str,
parse_dates=["BEGIN", "END"],
)
hasGEO = (
isd_history.LAT.notnull() & isd_history.LON.notnull() & (isd_history.LAT != 0)
)
isUS = (
((isd_history.CTRY == "US") & (isd_history.STATE.notnull()))
# AQ = American Samoa, GQ = Guam, RQ = Peurto Rico, VQ = Virgin Islands
| (isd_history.CTRY.str[1] == "Q")
)
hasUSAF = isd_history.USAF != "999999"
metadata = {}
for usaf_station, group in isd_history[hasGEO & isUS & hasUSAF].groupby("USAF"):
# find most recent
recent = group.loc[group.END.idxmax()]
wban_stations = list(group.WBAN)
metadata[usaf_station] = {
"usaf_id": usaf_station,
"wban_ids": wban_stations,
"recent_wban_id": recent.WBAN,
"name": recent["STATION NAME"],
"icao_code": recent.ICAO,
"latitude": recent.LAT if recent.LAT not in ("+00.000",) else None,
"longitude": recent.LON if recent.LON not in ("+000.000",) else None,
"point": Point(float(recent.LON), float(recent.LAT)),
"elevation": recent["ELEV(M)"]
if not str(float(recent["ELEV(M)"])).startswith("-999")
else None,
"state": recent.STATE,
}
return metadata
def _compute_isd_station_quality(
isd_station_metadata,
isd_file_metadata,
end_year=None,
years_back=None,
quality_func=None,
):
if end_year is None:
end_year = datetime.now().year - 1 # last full year
if years_back is None:
years_back = 5
if quality_func is None:
def quality_func(values):
minimum = values.min()
if minimum > 24 * 25:
return "high"
elif minimum > 24 * 15:
return "medium"
else:
return "low"
# e.g., if end_year == 2017, year_range = ["2013", "2014", ..., "2017"]
year_range = set([str(y) for y in range(end_year - (years_back - 1), end_year + 1)])
def _compute_station_quality(usaf_id):
years_data = isd_file_metadata.get(usaf_id, {}).get("years", {})
if not all([year in years_data for year in year_range]):
return quality_func(np.repeat(0, 60))
counts = defaultdict(lambda: 0)
for y, year in enumerate(year_range):
for station in years_data[year]:
for m, month_counts in enumerate(station["counts"]):
counts[y * 12 + m] += int(month_counts)
return quality_func(np.array(list(counts.values())))
# figure out counts for years of interest
for usaf_id, metadata in isd_station_metadata.items():
metadata["quality"] = _compute_station_quality(usaf_id)
def _load_zcta_metadata(download_path):
from shapely.geometry import shape
# load zcta geojson
geojson_path = os.path.join(download_path, "cb_2016_us_zcta510_500k.json")
with open(geojson_path, "r") as f:
geojson = json.load(f)
# load ZIP code prefixes by state
zipcode_prefixes_path = os.path.join(download_path, "zipcode_prefixes.json")
with open(zipcode_prefixes_path, "r") as f:
zipcode_prefixes = json.load(f)
prefix_to_zipcode = {
zipcode_prefix: state
for state, zipcode_prefix_list in zipcode_prefixes.items()
for zipcode_prefix in zipcode_prefix_list
}
def _get_state(zcta):
prefix = zcta[:3]
return prefix_to_zipcode.get(prefix)
metadata = {}
for feature in geojson["features"]:
zcta = feature["properties"]["GEOID10"]
geometry = feature["geometry"]
polygon = shape(geometry)
centroid = polygon.centroid
state = _get_state(zcta)
metadata[zcta] = {
"zcta": zcta,
"polygon": polygon,
"geometry": to_geojson(polygon),
"centroid": centroid,
"latitude": centroid.coords[0][1],
"longitude": centroid.coords[0][0],
"state": state,
}
return metadata
def _load_county_metadata(download_path):
from shapely.geometry import shape
# load county geojson
geojson_path = os.path.join(download_path, "cb_2016_us_county_500k.json")
with open(geojson_path, "r") as f:
geojson = json.load(f)
metadata = {}
for feature in geojson["features"]:
county = feature["properties"]["GEOID"]
geometry = feature["geometry"]
polygon = shape(geometry)
centroid = polygon.centroid
metadata[county] = {
"county": county,
"polygon": polygon,
"geometry": to_geojson(polygon),
"centroid": centroid,
"latitude": centroid.coords[0][1],
"longitude": centroid.coords[0][0],
}
# load county climate zones
county_climate_zones = pd.read_csv(
os.path.join(download_path, "climate_zones.csv"),
dtype=str,
usecols=[
"State FIPS",
"County FIPS",
"IECC Climate Zone",
"IECC Moisture Regime",
"BA Climate Zone",
"County Name",
],
)
for i, row in county_climate_zones.iterrows():
county = row["State FIPS"] + row["County FIPS"]
if county not in metadata:
logger.warn(
"Could not find geometry for county {}, skipping.".format(county)
)
continue
metadata[county].update(
{
"name": row["County Name"],
"iecc_climate_zone": row["IECC Climate Zone"],
"iecc_moisture_regime": (
row["IECC Moisture Regime"]
if not pd.isnull(row["IECC Moisture Regime"])
else None
),
"ba_climate_zone": row["BA Climate Zone"],
}
)
return metadata
def _load_CA_climate_zone_metadata(download_path):
from shapely.geometry import shape, mapping
ca_climate_zone_names = {
"01": "Arcata",
"02": "Santa Rosa",
"03": "Oakland",
"04": "San Jose-Reid",
"05": "Santa Maria",
"06": "Torrance",
"07": "San Diego-Lindbergh",
"08": "Fullerton",
"09": "Burbank-Glendale",
"10": "Riverside",
"11": "Red Bluff",
"12": "Sacramento",
"13": "Fresno",
"14": "Palmdale",
"15": "Palm Spring-Intl",
"16": "Blue Canyon",
}
geojson_path = os.path.join(
download_path, "CA_Building_Standards_Climate_Zones.json"
)
with open(geojson_path, "r") as f:
geojson = json.load(f)
metadata = {}
for feature in geojson["features"]:
zone = "{:02d}".format(int(feature["properties"]["Zone"]))
geometry = feature["geometry"]
polygon = shape(geometry)
metadata[zone] = {
"ca_climate_zone": "CA_{}".format(zone),
"name": ca_climate_zone_names[zone],
"polygon": polygon,
"geometry": to_geojson(polygon),
}
return metadata
def _load_tmy3_station_metadata(download_path):
from bs4 import BeautifulSoup
path = os.path.join(download_path, "tmy3-stations.html")
with open(path, "r") as f:
soup = BeautifulSoup(f.read(), "html.parser")
tmy3_station_elements = soup.select("td .hide")
metadata = {}
for station_el in tmy3_station_elements:
station_name_el = station_el.findNext("td").findNext("td")
station_class_el = station_name_el.findNext("td")
usaf_id = station_el.text.strip()
name = (
"".join(station_name_el.text.split(",")[:-1])
.replace("\n", "")
.replace("\t", "")
.strip()
)
metadata[usaf_id] = {
"usaf_id": usaf_id,
"name": name,
"state": station_name_el.text.split(",")[-1].strip(),
"class": station_class_el.text.split()[1].strip(),
}
return metadata
def _load_cz2010_station_metadata():
return {usaf_id: {"usaf_id": usaf_id} for usaf_id in CZ2010_LIST}
def _create_merged_climate_zones_metadata(county_metadata):
from shapely.ops import cascaded_union
iecc_climate_zone_polygons = defaultdict(list)
iecc_moisture_regime_polygons = defaultdict(list)
ba_climate_zone_polygons = defaultdict(list)
for county in county_metadata.values():
polygon = county["polygon"]
iecc_climate_zone = county.get("iecc_climate_zone")
iecc_moisture_regime = county.get("iecc_moisture_regime")
ba_climate_zone = county.get("ba_climate_zone")
if iecc_climate_zone is not None:
iecc_climate_zone_polygons[iecc_climate_zone].append(polygon)
if iecc_moisture_regime is not None:
iecc_moisture_regime_polygons[iecc_moisture_regime].append(polygon)
if ba_climate_zone is not None:
ba_climate_zone_polygons[ba_climate_zone].append(polygon)
iecc_climate_zone_metadata = {}
for iecc_climate_zone, polygons in iecc_climate_zone_polygons.items():
polygon = cascaded_union(polygons)
polygon = polygon.simplify(0.01)
iecc_climate_zone_metadata[iecc_climate_zone] = {
"iecc_climate_zone": iecc_climate_zone,
"polygon": polygon,
"geometry": to_geojson(polygon),
}
iecc_moisture_regime_metadata = {}
for iecc_moisture_regime, polygons in iecc_moisture_regime_polygons.items():
polygon = cascaded_union(polygons)
polygon = polygon.simplify(0.01)
iecc_moisture_regime_metadata[iecc_moisture_regime] = {
"iecc_moisture_regime": iecc_moisture_regime,
"polygon": polygon,
"geometry": to_geojson(polygon),
}
ba_climate_zone_metadata = {}
for ba_climate_zone, polygons in ba_climate_zone_polygons.items():
polygon = cascaded_union(polygons)
polygon = polygon.simplify(0.01)
ba_climate_zone_metadata[ba_climate_zone] = {
"ba_climate_zone": ba_climate_zone,
"polygon": polygon,
"geometry": to_geojson(polygon),
}
return (
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
)
def _compute_containment(
point_metadata, point_id_field, polygon_metadata, polygon_metadata_field
):
from shapely.vectorized import contains
points, lats, lons = zip(
*[
(point, point["latitude"], point["longitude"])
for point in point_metadata.values()
]
)
for i, polygon in enumerate(polygon_metadata.values()):
containment = contains(polygon["polygon"], lons, lats)
for point, c in zip(points, containment):
if c:
point[polygon_metadata_field] = polygon[polygon_metadata_field]
# fill in with None
for point in point_metadata.values():
point[polygon_metadata_field] = point.get(polygon_metadata_field, None)
def _map_zcta_to_climate_zones(
zcta_metadata,
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
ca_climate_zone_metadata,
):
_compute_containment(
zcta_metadata, "zcta", iecc_climate_zone_metadata, "iecc_climate_zone"
)
_compute_containment(
zcta_metadata, "zcta", iecc_moisture_regime_metadata, "iecc_moisture_regime"
)
_compute_containment(
zcta_metadata, "zcta", ba_climate_zone_metadata, "ba_climate_zone"
)
_compute_containment(
zcta_metadata, "zcta", ca_climate_zone_metadata, "ca_climate_zone"
)
def _map_isd_station_to_climate_zones(
isd_station_metadata,
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
ca_climate_zone_metadata,
):
_compute_containment(
isd_station_metadata, "usaf_id", iecc_climate_zone_metadata, "iecc_climate_zone"
)
_compute_containment(
isd_station_metadata,
"usaf_id",
iecc_moisture_regime_metadata,
"iecc_moisture_regime",
)
_compute_containment(
isd_station_metadata, "usaf_id", ba_climate_zone_metadata, "ba_climate_zone"
)
_compute_containment(
isd_station_metadata, "usaf_id", ca_climate_zone_metadata, "ca_climate_zone"
)
def _find_zcta_closest_isd_stations(zcta_metadata, isd_station_metadata, limit=None):
if limit is None:
limit = 10
import pyproj
geod = pyproj.Geod(ellps="WGS84")
isd_usaf_ids, isd_lats, isd_lngs = zip(
*[
(
isd_station["usaf_id"],
float(isd_station["latitude"]),
float(isd_station["longitude"]),
)
for isd_station in isd_station_metadata.values()
]
)
isd_lats = np.array(isd_lats)
isd_lngs = np.array(isd_lngs)
for zcta in zcta_metadata.values():
zcta_lats = np.tile(zcta["latitude"], isd_lats.shape)
zcta_lngs = np.tile(zcta["longitude"], isd_lngs.shape)
dists = geod.inv(zcta_lngs, zcta_lats, isd_lngs, isd_lats)[2]
sorted_dists = np.argsort(dists)[:limit]
closest_isd_stations = []
for i, idx in enumerate(sorted_dists):
usaf_id = isd_usaf_ids[idx]
isd_station = isd_station_metadata[usaf_id]
closest_isd_stations.append(
{
"usaf_id": usaf_id,
"distance_meters": int(round(dists[idx])),
"rank": i + 1,
"iecc_climate_zone_match": (
zcta.get("iecc_climate_zone")
== isd_station.get("iecc_climate_zone")
),
"iecc_moisture_regime_match": (
zcta.get("iecc_moisture_regime")
== isd_station.get("iecc_moisture_regime")
),
"ba_climate_zone_match": (
zcta.get("ba_climate_zone")
== isd_station.get("ba_climate_zone")
),
"ca_climate_zone_match": (
zcta.get("ca_climate_zone")
== isd_station.get("ca_climate_zone")
),
}
)
zcta["closest_isd_stations"] = closest_isd_stations
def _create_table_structures(conn):
cur = conn.cursor()
cur.execute(
"""
create table isd_station_metadata (
usaf_id text not null
, wban_ids text not null
, recent_wban_id text not null
, name text not null
, icao_code text
, latitude text
, longitude text
, elevation text
, state text
, quality text default 'low'
, iecc_climate_zone text
, iecc_moisture_regime text
, ba_climate_zone text
, ca_climate_zone text
)
"""
)
cur.execute(
"""
create table isd_file_metadata (
usaf_id text not null
, year text not null
, wban_id text not null
)
"""
)
cur.execute(
"""
create table zcta_metadata (
zcta_id text not null
, geometry text
, latitude text not null
, longitude text not null
, state text
, iecc_climate_zone text
, iecc_moisture_regime text
, ba_climate_zone text
, ca_climate_zone text
)
"""
)
cur.execute(
"""
create table iecc_climate_zone_metadata (
iecc_climate_zone text not null
, geometry text
)
"""
)
cur.execute(
"""
create table iecc_moisture_regime_metadata (
iecc_moisture_regime text not null
, geometry text
)
"""
)
cur.execute(
"""
create table ba_climate_zone_metadata (
ba_climate_zone text not null
, geometry text
)
"""
)
cur.execute(
"""
create table ca_climate_zone_metadata (
ca_climate_zone text not null
, name text not null
, geometry text
)
"""
)
cur.execute(
"""
create table tmy3_station_metadata (
usaf_id text not null
, name text not null
, state text not null
, class text not null
)
"""
)
cur.execute(
"""
create table cz2010_station_metadata (
usaf_id text not null
)
"""
)
def _write_isd_station_metadata_table(conn, isd_station_metadata):
cur = conn.cursor()
rows = [
(
metadata["usaf_id"],
",".join(metadata["wban_ids"]),
metadata["recent_wban_id"],
metadata["name"],
metadata["icao_code"],
metadata["latitude"],
metadata["longitude"],
metadata["elevation"],
metadata["state"],
metadata["quality"],
metadata["iecc_climate_zone"],
metadata["iecc_moisture_regime"],
metadata["ba_climate_zone"],
metadata["ca_climate_zone"],
)
for station, metadata in sorted(isd_station_metadata.items())
]
cur.executemany(
"""
insert into isd_station_metadata(
usaf_id
, wban_ids
, recent_wban_id
, name
, icao_code
, latitude
, longitude
, elevation
, state
, quality
, iecc_climate_zone
, iecc_moisture_regime
, ba_climate_zone
, ca_climate_zone
) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?)
""",
rows,
)
cur.execute(
"""
create index isd_station_metadata_usaf_id on isd_station_metadata(usaf_id)
"""
)
cur.execute(
"""
create index isd_station_metadata_state on isd_station_metadata(state)
"""
)
cur.execute(
"""
create index isd_station_metadata_iecc_climate_zone on
isd_station_metadata(iecc_climate_zone)
"""
)
cur.execute(
"""
create index isd_station_metadata_iecc_moisture_regime on
isd_station_metadata(iecc_moisture_regime)
"""
)
cur.execute(
"""
create index isd_station_metadata_ba_climate_zone on
isd_station_metadata(ba_climate_zone)
"""
)
cur.execute(
"""
create index isd_station_metadata_ca_climate_zone on
isd_station_metadata(ca_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_isd_file_metadata_table(conn, isd_file_metadata):
cur = conn.cursor()
rows = [
(metadata["usaf_id"], year, station_data["wban_id"])
for isd_station, metadata in sorted(isd_file_metadata.items())
for year, year_data in sorted(metadata["years"].items())
for station_data in year_data
]
cur.executemany(
"""
insert into isd_file_metadata(
usaf_id
, year
, wban_id
) values (?,?,?)
""",
rows,
)
cur.execute(
"""
create index isd_file_metadata_usaf_id on
isd_file_metadata(usaf_id)
"""
)
cur.execute(
"""
create index isd_file_metadata_year on
isd_file_metadata(year)
"""
)
cur.execute(
"""
create index isd_file_metadata_usaf_id_year on
isd_file_metadata(usaf_id, year)
"""
)
cur.execute(
"""
create index isd_file_metadata_wban_id on
isd_file_metadata(wban_id)
"""
)
cur.close()
conn.commit()
def _write_zcta_metadata_table(conn, zcta_metadata, geometry=False):
cur = conn.cursor()
rows = [
(
metadata["zcta"],
metadata["geometry"] if geometry else None,
metadata["latitude"],
metadata["longitude"],
metadata["state"],
metadata["iecc_climate_zone"],
metadata["iecc_moisture_regime"],
metadata["ba_climate_zone"],
metadata["ca_climate_zone"],
)
for zcta, metadata in sorted(zcta_metadata.items())
]
cur.executemany(
"""
insert into zcta_metadata(
zcta_id
, geometry
, latitude
, longitude
, state
, iecc_climate_zone
, iecc_moisture_regime
, ba_climate_zone
, ca_climate_zone
) values (?,?,?,?,?,?,?,?,?)
""",
rows,
)
cur.execute(
"""
create index zcta_metadata_zcta_id on zcta_metadata(zcta_id)
"""
)
cur.execute(
"""
create index zcta_metadata_state on zcta_metadata(state)
"""
)
cur.execute(
"""
create index zcta_metadata_iecc_climate_zone on zcta_metadata(iecc_climate_zone)
"""
)
cur.execute(
"""
create index zcta_metadata_iecc_moisture_regime on zcta_metadata(iecc_moisture_regime)
"""
)
cur.execute(
"""
create index zcta_metadata_ba_climate_zone on zcta_metadata(ba_climate_zone)
"""
)
cur.execute(
"""
create index zcta_metadata_ca_climate_zone on zcta_metadata(ca_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_iecc_climate_zone_metadata_table(
conn, iecc_climate_zone_metadata, geometry=True
):
cur = conn.cursor()
rows = [
(metadata["iecc_climate_zone"], metadata["geometry"] if geometry else None)
for iecc_climate_zone, metadata in sorted(iecc_climate_zone_metadata.items())
]
cur.executemany(
"""
insert into iecc_climate_zone_metadata(
iecc_climate_zone
, geometry
) values (?,?)
""",
rows,
)
cur.execute(
"""
create index iecc_climate_zone_metadata_iecc_climate_zone on
iecc_climate_zone_metadata(iecc_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_iecc_moisture_regime_metadata_table(
conn, iecc_moisture_regime_metadata, geometry=True
):
cur = conn.cursor()
rows = [
(metadata["iecc_moisture_regime"], metadata["geometry"] if geometry else None)
for iecc_moisture_regime, metadata in sorted(
iecc_moisture_regime_metadata.items()
)
]
cur.executemany(
"""
insert into iecc_moisture_regime_metadata(
iecc_moisture_regime
, geometry
) values (?,?)
""",
rows,
)
cur.execute(
"""
create index iecc_moisture_regime_metadata_iecc_moisture_regime on
iecc_moisture_regime_metadata(iecc_moisture_regime)
"""
)
cur.close()
conn.commit()
def _write_ba_climate_zone_metadata_table(
conn, ba_climate_zone_metadata, geometry=True
):
cur = conn.cursor()
rows = [
(metadata["ba_climate_zone"], metadata["geometry"] if geometry else None)
for ba_climate_zone, metadata in sorted(ba_climate_zone_metadata.items())
]
cur.executemany(
"""
insert into ba_climate_zone_metadata(
ba_climate_zone
, geometry
) values (?,?)
""",
rows,
)
cur.execute(
"""
create index ba_climate_zone_metadata_ba_climate_zone on
ba_climate_zone_metadata(ba_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_ca_climate_zone_metadata_table(
conn, ca_climate_zone_metadata, geometry=True
):
cur = conn.cursor()
rows = [
(
metadata["ca_climate_zone"],
metadata["name"],
metadata["geometry"] if geometry else None,
)
for ca_climate_zone, metadata in sorted(ca_climate_zone_metadata.items())
]
cur.executemany(
"""
insert into ca_climate_zone_metadata(
ca_climate_zone
, name
, geometry
) values (?,?,?)
""",
rows,
)
cur.execute(
"""
create index ca_climate_zone_metadata_ca_climate_zone on
ca_climate_zone_metadata(ca_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_tmy3_station_metadata_table(conn, tmy3_station_metadata):
cur = conn.cursor()
rows = [
(metadata["usaf_id"], metadata["name"], metadata["state"], metadata["class"])
for tmy3_station, metadata in sorted(tmy3_station_metadata.items())
]
cur.executemany(
"""
insert into tmy3_station_metadata(
usaf_id
, name
, state
, class
) values (?,?,?,?)
""",
rows,
)
cur.execute(
"""
create index tmy3_station_metadata_usaf_id on
tmy3_station_metadata(usaf_id)
"""
)
cur.close()
conn.commit()
def _write_cz2010_station_metadata_table(conn, cz2010_station_metadata):
cur = conn.cursor()
rows = [
(metadata["usaf_id"],)
for cz2010_station, metadata in sorted(cz2010_station_metadata.items())
]
cur.executemany(
"""
insert into cz2010_station_metadata(
usaf_id
) values (?)
""",
rows,
)
cur.execute(
"""
create index cz2010_station_metadata_usaf_id on
cz2010_station_metadata(usaf_id)
"""
)
cur.close()
conn.commit()
def build_metadata_db(
zcta_geometry=False,
iecc_climate_zone_geometry=True,
iecc_moisture_regime_geometry=True,
ba_climate_zone_geometry=True,
ca_climate_zone_geometry=True,
):
""" Build database of metadata from primary sources.
Downloads primary sources, clears existing DB, and rebuilds from scratch.
Parameters
----------
zcta_geometry : bool, optional
Whether or not to include ZCTA geometry in database.
iecc_climate_zone_geometry : bool, optional
Whether or not to include IECC Climate Zone geometry in database.
iecc_moisture_regime_geometry : bool, optional
Whether or not to include IECC Moisture Regime geometry in database.
ba_climate_zone_geometry : bool, optional
Whether or not to include Building America Climate Zone geometry in database.
ca_climate_zone_geometry : bool, optional
Whether or not to include California Building Climate Zone Area geometry in database.
"""
try:
import shapely
except ImportError:
raise ImportError("Loading polygons requires shapely.")
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError("Scraping TMY3 station data requires beautifulsoup4.")
try:
import pyproj
except ImportError:
raise ImportError("Computing distances requires pyproj.")
try:
import simplejson
except ImportError:
raise ImportError("Writing geojson requires simplejson.")
download_path = _download_primary_sources()
conn = metadata_db_connection_proxy.reset_database()
# Load data into memory
print("Loading ZCTAs")
zcta_metadata = _load_zcta_metadata(download_path)
print("Loading counties")
county_metadata = _load_county_metadata(download_path)
print("Merging county climate zones")
(
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
) = _create_merged_climate_zones_metadata(county_metadata)
print("Loading CA climate zones")
ca_climate_zone_metadata = _load_CA_climate_zone_metadata(download_path)
print("Loading ISD station metadata")
isd_station_metadata = _load_isd_station_metadata(download_path)
print("Loading ISD station file metadata")
isd_file_metadata = _load_isd_file_metadata(download_path, isd_station_metadata)
print("Loading TMY3 station metadata")
tmy3_station_metadata = _load_tmy3_station_metadata(download_path)
print("Loading CZ2010 station metadata")
cz2010_station_metadata = _load_cz2010_station_metadata()
# Augment data in memory
print("Computing ISD station quality")
# add rough station quality to station metadata
# (all months in last 5 years have at least 600 points)
_compute_isd_station_quality(isd_station_metadata, isd_file_metadata)
print("Mapping ZCTAs to climate zones")
# add county and ca climate zone mappings
_map_zcta_to_climate_zones(
zcta_metadata,
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
ca_climate_zone_metadata,
)
print("Mapping ISD stations to climate zones")
# add county and ca climate zone mappings
_map_isd_station_to_climate_zones(
isd_station_metadata,
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
ca_climate_zone_metadata,
)
# Write tables
print("Creating table structures")
_create_table_structures(conn)
print("Writing ZCTA data")
_write_zcta_metadata_table(conn, zcta_metadata, geometry=zcta_geometry)
print("Writing IECC climate zone data")
_write_iecc_climate_zone_metadata_table(
conn, iecc_climate_zone_metadata, geometry=iecc_climate_zone_geometry
)
print("Writing IECC moisture regime data")
_write_iecc_moisture_regime_metadata_table(
conn, iecc_moisture_regime_metadata, geometry=iecc_moisture_regime_geometry
)
print("Writing BA climate zone data")
_write_ba_climate_zone_metadata_table(
conn, ba_climate_zone_metadata, geometry=ba_climate_zone_geometry
)
print("Writing CA climate zone data")
_write_ca_climate_zone_metadata_table(
conn, ca_climate_zone_metadata, geometry=ca_climate_zone_geometry
)
print("Writing ISD station metadata")
_write_isd_station_metadata_table(conn, isd_station_metadata)
print("Writing ISD file metadata")
_write_isd_file_metadata_table(conn, isd_file_metadata)
print("Writing TMY3 station metadata")
_write_tmy3_station_metadata_table(conn, tmy3_station_metadata)
print("Writing CZ2010 station metadata")
_write_cz2010_station_metadata_table(conn, cz2010_station_metadata)
print("Cleaning up...")
shutil.rmtree(download_path)
print("\u2728 Completed! \u2728")
def inspect_metadata_db():
subprocess.call(["sqlite3", metadata_db_connection_proxy.db_path])
|
openeemeter/eeweather | eeweather/database.py | build_metadata_db | python | def build_metadata_db(
zcta_geometry=False,
iecc_climate_zone_geometry=True,
iecc_moisture_regime_geometry=True,
ba_climate_zone_geometry=True,
ca_climate_zone_geometry=True,
):
try:
import shapely
except ImportError:
raise ImportError("Loading polygons requires shapely.")
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError("Scraping TMY3 station data requires beautifulsoup4.")
try:
import pyproj
except ImportError:
raise ImportError("Computing distances requires pyproj.")
try:
import simplejson
except ImportError:
raise ImportError("Writing geojson requires simplejson.")
download_path = _download_primary_sources()
conn = metadata_db_connection_proxy.reset_database()
# Load data into memory
print("Loading ZCTAs")
zcta_metadata = _load_zcta_metadata(download_path)
print("Loading counties")
county_metadata = _load_county_metadata(download_path)
print("Merging county climate zones")
(
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
) = _create_merged_climate_zones_metadata(county_metadata)
print("Loading CA climate zones")
ca_climate_zone_metadata = _load_CA_climate_zone_metadata(download_path)
print("Loading ISD station metadata")
isd_station_metadata = _load_isd_station_metadata(download_path)
print("Loading ISD station file metadata")
isd_file_metadata = _load_isd_file_metadata(download_path, isd_station_metadata)
print("Loading TMY3 station metadata")
tmy3_station_metadata = _load_tmy3_station_metadata(download_path)
print("Loading CZ2010 station metadata")
cz2010_station_metadata = _load_cz2010_station_metadata()
# Augment data in memory
print("Computing ISD station quality")
# add rough station quality to station metadata
# (all months in last 5 years have at least 600 points)
_compute_isd_station_quality(isd_station_metadata, isd_file_metadata)
print("Mapping ZCTAs to climate zones")
# add county and ca climate zone mappings
_map_zcta_to_climate_zones(
zcta_metadata,
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
ca_climate_zone_metadata,
)
print("Mapping ISD stations to climate zones")
# add county and ca climate zone mappings
_map_isd_station_to_climate_zones(
isd_station_metadata,
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
ca_climate_zone_metadata,
)
# Write tables
print("Creating table structures")
_create_table_structures(conn)
print("Writing ZCTA data")
_write_zcta_metadata_table(conn, zcta_metadata, geometry=zcta_geometry)
print("Writing IECC climate zone data")
_write_iecc_climate_zone_metadata_table(
conn, iecc_climate_zone_metadata, geometry=iecc_climate_zone_geometry
)
print("Writing IECC moisture regime data")
_write_iecc_moisture_regime_metadata_table(
conn, iecc_moisture_regime_metadata, geometry=iecc_moisture_regime_geometry
)
print("Writing BA climate zone data")
_write_ba_climate_zone_metadata_table(
conn, ba_climate_zone_metadata, geometry=ba_climate_zone_geometry
)
print("Writing CA climate zone data")
_write_ca_climate_zone_metadata_table(
conn, ca_climate_zone_metadata, geometry=ca_climate_zone_geometry
)
print("Writing ISD station metadata")
_write_isd_station_metadata_table(conn, isd_station_metadata)
print("Writing ISD file metadata")
_write_isd_file_metadata_table(conn, isd_file_metadata)
print("Writing TMY3 station metadata")
_write_tmy3_station_metadata_table(conn, tmy3_station_metadata)
print("Writing CZ2010 station metadata")
_write_cz2010_station_metadata_table(conn, cz2010_station_metadata)
print("Cleaning up...")
shutil.rmtree(download_path)
print("\u2728 Completed! \u2728") | Build database of metadata from primary sources.
Downloads primary sources, clears existing DB, and rebuilds from scratch.
Parameters
----------
zcta_geometry : bool, optional
Whether or not to include ZCTA geometry in database.
iecc_climate_zone_geometry : bool, optional
Whether or not to include IECC Climate Zone geometry in database.
iecc_moisture_regime_geometry : bool, optional
Whether or not to include IECC Moisture Regime geometry in database.
ba_climate_zone_geometry : bool, optional
Whether or not to include Building America Climate Zone geometry in database.
ca_climate_zone_geometry : bool, optional
Whether or not to include California Building Climate Zone Area geometry in database. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/database.py#L1130-L1275 | [
"def _download_primary_sources():\n root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n scripts_path = os.path.join(root_dir, \"scripts\", \"download_primary_sources.sh\")\n\n download_path = tempfile.mkdtemp()\n subprocess.call([scripts_path, download_path])\n return download_pa... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import defaultdict
from datetime import datetime, timedelta
import json
import logging
import os
import shutil
import subprocess
import tempfile
import pandas as pd
import numpy as np
from .connections import noaa_ftp_connection_proxy, metadata_db_connection_proxy
logger = logging.getLogger(__name__)
__all__ = ("build_metadata_db", "inspect_metadata_db")
CZ2010_LIST = [
"725958",
"725945",
"723840",
"724837",
"724800",
"725845",
"747188",
"722880",
"723926",
"722926",
"722927",
"746120",
"722899",
"724936",
"725946",
"723815",
"723810",
"722810",
"725940",
"723890",
"722976",
"724935",
"747185",
"722909",
"723826",
"722956",
"725847",
"723816",
"747020",
"724927",
"722895",
"722970",
"722975",
"722874",
"722950",
"724815",
"724926",
"722953",
"725955",
"724915",
"725957",
"724955",
"723805",
"724930",
"723927",
"722868",
"747187",
"723820",
"724937",
"723965",
"723910",
"723895",
"725910",
"725920",
"722860",
"722869",
"724830",
"724839",
"724917",
"724938",
"722925",
"722907",
"722900",
"722903",
"722906",
"724940",
"724945",
"724946",
"722897",
"722910",
"723830",
"722977",
"723925",
"723940",
"722885",
"724957",
"724920",
"722955",
"745160",
"725846",
"690150",
"725905",
"722886",
"723930",
"723896",
"724838",
]
class PrettyFloat(float):
def __repr__(self):
return "%.7g" % self
def pretty_floats(obj):
if isinstance(obj, float):
return PrettyFloat(round(obj, 4))
elif isinstance(obj, dict):
return dict((k, pretty_floats(v)) for k, v in obj.items())
elif isinstance(obj, (list, tuple)):
return list(map(pretty_floats, obj))
return obj
def to_geojson(polygon):
import simplejson
from shapely.geometry import mapping
return simplejson.dumps(pretty_floats(mapping(polygon)), separators=(",", ":"))
def _download_primary_sources():
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
scripts_path = os.path.join(root_dir, "scripts", "download_primary_sources.sh")
download_path = tempfile.mkdtemp()
subprocess.call([scripts_path, download_path])
return download_path
def _load_isd_station_metadata(download_path):
""" Collect metadata for US isd stations.
"""
from shapely.geometry import Point
# load ISD history which contains metadata
isd_history = pd.read_csv(
os.path.join(download_path, "isd-history.csv"),
dtype=str,
parse_dates=["BEGIN", "END"],
)
hasGEO = (
isd_history.LAT.notnull() & isd_history.LON.notnull() & (isd_history.LAT != 0)
)
isUS = (
((isd_history.CTRY == "US") & (isd_history.STATE.notnull()))
# AQ = American Samoa, GQ = Guam, RQ = Peurto Rico, VQ = Virgin Islands
| (isd_history.CTRY.str[1] == "Q")
)
hasUSAF = isd_history.USAF != "999999"
metadata = {}
for usaf_station, group in isd_history[hasGEO & isUS & hasUSAF].groupby("USAF"):
# find most recent
recent = group.loc[group.END.idxmax()]
wban_stations = list(group.WBAN)
metadata[usaf_station] = {
"usaf_id": usaf_station,
"wban_ids": wban_stations,
"recent_wban_id": recent.WBAN,
"name": recent["STATION NAME"],
"icao_code": recent.ICAO,
"latitude": recent.LAT if recent.LAT not in ("+00.000",) else None,
"longitude": recent.LON if recent.LON not in ("+000.000",) else None,
"point": Point(float(recent.LON), float(recent.LAT)),
"elevation": recent["ELEV(M)"]
if not str(float(recent["ELEV(M)"])).startswith("-999")
else None,
"state": recent.STATE,
}
return metadata
def _load_isd_file_metadata(download_path, isd_station_metadata):
""" Collect data counts for isd files.
"""
isd_inventory = pd.read_csv(
os.path.join(download_path, "isd-inventory.csv"), dtype=str
)
# filter to stations with metadata
station_keep = [usaf in isd_station_metadata for usaf in isd_inventory.USAF]
isd_inventory = isd_inventory[station_keep]
# filter by year
year_keep = isd_inventory.YEAR > "2005"
isd_inventory = isd_inventory[year_keep]
metadata = {}
for (usaf_station, year), group in isd_inventory.groupby(["USAF", "YEAR"]):
if usaf_station not in metadata:
metadata[usaf_station] = {"usaf_id": usaf_station, "years": {}}
metadata[usaf_station]["years"][year] = [
{
"wban_id": row.WBAN,
"counts": [
row.JAN,
row.FEB,
row.MAR,
row.APR,
row.MAY,
row.JUN,
row.JUL,
row.AUG,
row.SEP,
row.OCT,
row.NOV,
row.DEC,
],
}
for i, row in group.iterrows()
]
return metadata
def _compute_isd_station_quality(
isd_station_metadata,
isd_file_metadata,
end_year=None,
years_back=None,
quality_func=None,
):
if end_year is None:
end_year = datetime.now().year - 1 # last full year
if years_back is None:
years_back = 5
if quality_func is None:
def quality_func(values):
minimum = values.min()
if minimum > 24 * 25:
return "high"
elif minimum > 24 * 15:
return "medium"
else:
return "low"
# e.g., if end_year == 2017, year_range = ["2013", "2014", ..., "2017"]
year_range = set([str(y) for y in range(end_year - (years_back - 1), end_year + 1)])
def _compute_station_quality(usaf_id):
years_data = isd_file_metadata.get(usaf_id, {}).get("years", {})
if not all([year in years_data for year in year_range]):
return quality_func(np.repeat(0, 60))
counts = defaultdict(lambda: 0)
for y, year in enumerate(year_range):
for station in years_data[year]:
for m, month_counts in enumerate(station["counts"]):
counts[y * 12 + m] += int(month_counts)
return quality_func(np.array(list(counts.values())))
# figure out counts for years of interest
for usaf_id, metadata in isd_station_metadata.items():
metadata["quality"] = _compute_station_quality(usaf_id)
def _load_zcta_metadata(download_path):
from shapely.geometry import shape
# load zcta geojson
geojson_path = os.path.join(download_path, "cb_2016_us_zcta510_500k.json")
with open(geojson_path, "r") as f:
geojson = json.load(f)
# load ZIP code prefixes by state
zipcode_prefixes_path = os.path.join(download_path, "zipcode_prefixes.json")
with open(zipcode_prefixes_path, "r") as f:
zipcode_prefixes = json.load(f)
prefix_to_zipcode = {
zipcode_prefix: state
for state, zipcode_prefix_list in zipcode_prefixes.items()
for zipcode_prefix in zipcode_prefix_list
}
def _get_state(zcta):
prefix = zcta[:3]
return prefix_to_zipcode.get(prefix)
metadata = {}
for feature in geojson["features"]:
zcta = feature["properties"]["GEOID10"]
geometry = feature["geometry"]
polygon = shape(geometry)
centroid = polygon.centroid
state = _get_state(zcta)
metadata[zcta] = {
"zcta": zcta,
"polygon": polygon,
"geometry": to_geojson(polygon),
"centroid": centroid,
"latitude": centroid.coords[0][1],
"longitude": centroid.coords[0][0],
"state": state,
}
return metadata
def _load_county_metadata(download_path):
from shapely.geometry import shape
# load county geojson
geojson_path = os.path.join(download_path, "cb_2016_us_county_500k.json")
with open(geojson_path, "r") as f:
geojson = json.load(f)
metadata = {}
for feature in geojson["features"]:
county = feature["properties"]["GEOID"]
geometry = feature["geometry"]
polygon = shape(geometry)
centroid = polygon.centroid
metadata[county] = {
"county": county,
"polygon": polygon,
"geometry": to_geojson(polygon),
"centroid": centroid,
"latitude": centroid.coords[0][1],
"longitude": centroid.coords[0][0],
}
# load county climate zones
county_climate_zones = pd.read_csv(
os.path.join(download_path, "climate_zones.csv"),
dtype=str,
usecols=[
"State FIPS",
"County FIPS",
"IECC Climate Zone",
"IECC Moisture Regime",
"BA Climate Zone",
"County Name",
],
)
for i, row in county_climate_zones.iterrows():
county = row["State FIPS"] + row["County FIPS"]
if county not in metadata:
logger.warn(
"Could not find geometry for county {}, skipping.".format(county)
)
continue
metadata[county].update(
{
"name": row["County Name"],
"iecc_climate_zone": row["IECC Climate Zone"],
"iecc_moisture_regime": (
row["IECC Moisture Regime"]
if not pd.isnull(row["IECC Moisture Regime"])
else None
),
"ba_climate_zone": row["BA Climate Zone"],
}
)
return metadata
def _load_CA_climate_zone_metadata(download_path):
from shapely.geometry import shape, mapping
ca_climate_zone_names = {
"01": "Arcata",
"02": "Santa Rosa",
"03": "Oakland",
"04": "San Jose-Reid",
"05": "Santa Maria",
"06": "Torrance",
"07": "San Diego-Lindbergh",
"08": "Fullerton",
"09": "Burbank-Glendale",
"10": "Riverside",
"11": "Red Bluff",
"12": "Sacramento",
"13": "Fresno",
"14": "Palmdale",
"15": "Palm Spring-Intl",
"16": "Blue Canyon",
}
geojson_path = os.path.join(
download_path, "CA_Building_Standards_Climate_Zones.json"
)
with open(geojson_path, "r") as f:
geojson = json.load(f)
metadata = {}
for feature in geojson["features"]:
zone = "{:02d}".format(int(feature["properties"]["Zone"]))
geometry = feature["geometry"]
polygon = shape(geometry)
metadata[zone] = {
"ca_climate_zone": "CA_{}".format(zone),
"name": ca_climate_zone_names[zone],
"polygon": polygon,
"geometry": to_geojson(polygon),
}
return metadata
def _load_tmy3_station_metadata(download_path):
from bs4 import BeautifulSoup
path = os.path.join(download_path, "tmy3-stations.html")
with open(path, "r") as f:
soup = BeautifulSoup(f.read(), "html.parser")
tmy3_station_elements = soup.select("td .hide")
metadata = {}
for station_el in tmy3_station_elements:
station_name_el = station_el.findNext("td").findNext("td")
station_class_el = station_name_el.findNext("td")
usaf_id = station_el.text.strip()
name = (
"".join(station_name_el.text.split(",")[:-1])
.replace("\n", "")
.replace("\t", "")
.strip()
)
metadata[usaf_id] = {
"usaf_id": usaf_id,
"name": name,
"state": station_name_el.text.split(",")[-1].strip(),
"class": station_class_el.text.split()[1].strip(),
}
return metadata
def _load_cz2010_station_metadata():
return {usaf_id: {"usaf_id": usaf_id} for usaf_id in CZ2010_LIST}
def _create_merged_climate_zones_metadata(county_metadata):
from shapely.ops import cascaded_union
iecc_climate_zone_polygons = defaultdict(list)
iecc_moisture_regime_polygons = defaultdict(list)
ba_climate_zone_polygons = defaultdict(list)
for county in county_metadata.values():
polygon = county["polygon"]
iecc_climate_zone = county.get("iecc_climate_zone")
iecc_moisture_regime = county.get("iecc_moisture_regime")
ba_climate_zone = county.get("ba_climate_zone")
if iecc_climate_zone is not None:
iecc_climate_zone_polygons[iecc_climate_zone].append(polygon)
if iecc_moisture_regime is not None:
iecc_moisture_regime_polygons[iecc_moisture_regime].append(polygon)
if ba_climate_zone is not None:
ba_climate_zone_polygons[ba_climate_zone].append(polygon)
iecc_climate_zone_metadata = {}
for iecc_climate_zone, polygons in iecc_climate_zone_polygons.items():
polygon = cascaded_union(polygons)
polygon = polygon.simplify(0.01)
iecc_climate_zone_metadata[iecc_climate_zone] = {
"iecc_climate_zone": iecc_climate_zone,
"polygon": polygon,
"geometry": to_geojson(polygon),
}
iecc_moisture_regime_metadata = {}
for iecc_moisture_regime, polygons in iecc_moisture_regime_polygons.items():
polygon = cascaded_union(polygons)
polygon = polygon.simplify(0.01)
iecc_moisture_regime_metadata[iecc_moisture_regime] = {
"iecc_moisture_regime": iecc_moisture_regime,
"polygon": polygon,
"geometry": to_geojson(polygon),
}
ba_climate_zone_metadata = {}
for ba_climate_zone, polygons in ba_climate_zone_polygons.items():
polygon = cascaded_union(polygons)
polygon = polygon.simplify(0.01)
ba_climate_zone_metadata[ba_climate_zone] = {
"ba_climate_zone": ba_climate_zone,
"polygon": polygon,
"geometry": to_geojson(polygon),
}
return (
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
)
def _compute_containment(
point_metadata, point_id_field, polygon_metadata, polygon_metadata_field
):
from shapely.vectorized import contains
points, lats, lons = zip(
*[
(point, point["latitude"], point["longitude"])
for point in point_metadata.values()
]
)
for i, polygon in enumerate(polygon_metadata.values()):
containment = contains(polygon["polygon"], lons, lats)
for point, c in zip(points, containment):
if c:
point[polygon_metadata_field] = polygon[polygon_metadata_field]
# fill in with None
for point in point_metadata.values():
point[polygon_metadata_field] = point.get(polygon_metadata_field, None)
def _map_zcta_to_climate_zones(
zcta_metadata,
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
ca_climate_zone_metadata,
):
_compute_containment(
zcta_metadata, "zcta", iecc_climate_zone_metadata, "iecc_climate_zone"
)
_compute_containment(
zcta_metadata, "zcta", iecc_moisture_regime_metadata, "iecc_moisture_regime"
)
_compute_containment(
zcta_metadata, "zcta", ba_climate_zone_metadata, "ba_climate_zone"
)
_compute_containment(
zcta_metadata, "zcta", ca_climate_zone_metadata, "ca_climate_zone"
)
def _map_isd_station_to_climate_zones(
isd_station_metadata,
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
ca_climate_zone_metadata,
):
_compute_containment(
isd_station_metadata, "usaf_id", iecc_climate_zone_metadata, "iecc_climate_zone"
)
_compute_containment(
isd_station_metadata,
"usaf_id",
iecc_moisture_regime_metadata,
"iecc_moisture_regime",
)
_compute_containment(
isd_station_metadata, "usaf_id", ba_climate_zone_metadata, "ba_climate_zone"
)
_compute_containment(
isd_station_metadata, "usaf_id", ca_climate_zone_metadata, "ca_climate_zone"
)
def _find_zcta_closest_isd_stations(zcta_metadata, isd_station_metadata, limit=None):
if limit is None:
limit = 10
import pyproj
geod = pyproj.Geod(ellps="WGS84")
isd_usaf_ids, isd_lats, isd_lngs = zip(
*[
(
isd_station["usaf_id"],
float(isd_station["latitude"]),
float(isd_station["longitude"]),
)
for isd_station in isd_station_metadata.values()
]
)
isd_lats = np.array(isd_lats)
isd_lngs = np.array(isd_lngs)
for zcta in zcta_metadata.values():
zcta_lats = np.tile(zcta["latitude"], isd_lats.shape)
zcta_lngs = np.tile(zcta["longitude"], isd_lngs.shape)
dists = geod.inv(zcta_lngs, zcta_lats, isd_lngs, isd_lats)[2]
sorted_dists = np.argsort(dists)[:limit]
closest_isd_stations = []
for i, idx in enumerate(sorted_dists):
usaf_id = isd_usaf_ids[idx]
isd_station = isd_station_metadata[usaf_id]
closest_isd_stations.append(
{
"usaf_id": usaf_id,
"distance_meters": int(round(dists[idx])),
"rank": i + 1,
"iecc_climate_zone_match": (
zcta.get("iecc_climate_zone")
== isd_station.get("iecc_climate_zone")
),
"iecc_moisture_regime_match": (
zcta.get("iecc_moisture_regime")
== isd_station.get("iecc_moisture_regime")
),
"ba_climate_zone_match": (
zcta.get("ba_climate_zone")
== isd_station.get("ba_climate_zone")
),
"ca_climate_zone_match": (
zcta.get("ca_climate_zone")
== isd_station.get("ca_climate_zone")
),
}
)
zcta["closest_isd_stations"] = closest_isd_stations
def _create_table_structures(conn):
cur = conn.cursor()
cur.execute(
"""
create table isd_station_metadata (
usaf_id text not null
, wban_ids text not null
, recent_wban_id text not null
, name text not null
, icao_code text
, latitude text
, longitude text
, elevation text
, state text
, quality text default 'low'
, iecc_climate_zone text
, iecc_moisture_regime text
, ba_climate_zone text
, ca_climate_zone text
)
"""
)
cur.execute(
"""
create table isd_file_metadata (
usaf_id text not null
, year text not null
, wban_id text not null
)
"""
)
cur.execute(
"""
create table zcta_metadata (
zcta_id text not null
, geometry text
, latitude text not null
, longitude text not null
, state text
, iecc_climate_zone text
, iecc_moisture_regime text
, ba_climate_zone text
, ca_climate_zone text
)
"""
)
cur.execute(
"""
create table iecc_climate_zone_metadata (
iecc_climate_zone text not null
, geometry text
)
"""
)
cur.execute(
"""
create table iecc_moisture_regime_metadata (
iecc_moisture_regime text not null
, geometry text
)
"""
)
cur.execute(
"""
create table ba_climate_zone_metadata (
ba_climate_zone text not null
, geometry text
)
"""
)
cur.execute(
"""
create table ca_climate_zone_metadata (
ca_climate_zone text not null
, name text not null
, geometry text
)
"""
)
cur.execute(
"""
create table tmy3_station_metadata (
usaf_id text not null
, name text not null
, state text not null
, class text not null
)
"""
)
cur.execute(
"""
create table cz2010_station_metadata (
usaf_id text not null
)
"""
)
def _write_isd_station_metadata_table(conn, isd_station_metadata):
cur = conn.cursor()
rows = [
(
metadata["usaf_id"],
",".join(metadata["wban_ids"]),
metadata["recent_wban_id"],
metadata["name"],
metadata["icao_code"],
metadata["latitude"],
metadata["longitude"],
metadata["elevation"],
metadata["state"],
metadata["quality"],
metadata["iecc_climate_zone"],
metadata["iecc_moisture_regime"],
metadata["ba_climate_zone"],
metadata["ca_climate_zone"],
)
for station, metadata in sorted(isd_station_metadata.items())
]
cur.executemany(
"""
insert into isd_station_metadata(
usaf_id
, wban_ids
, recent_wban_id
, name
, icao_code
, latitude
, longitude
, elevation
, state
, quality
, iecc_climate_zone
, iecc_moisture_regime
, ba_climate_zone
, ca_climate_zone
) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?)
""",
rows,
)
cur.execute(
"""
create index isd_station_metadata_usaf_id on isd_station_metadata(usaf_id)
"""
)
cur.execute(
"""
create index isd_station_metadata_state on isd_station_metadata(state)
"""
)
cur.execute(
"""
create index isd_station_metadata_iecc_climate_zone on
isd_station_metadata(iecc_climate_zone)
"""
)
cur.execute(
"""
create index isd_station_metadata_iecc_moisture_regime on
isd_station_metadata(iecc_moisture_regime)
"""
)
cur.execute(
"""
create index isd_station_metadata_ba_climate_zone on
isd_station_metadata(ba_climate_zone)
"""
)
cur.execute(
"""
create index isd_station_metadata_ca_climate_zone on
isd_station_metadata(ca_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_isd_file_metadata_table(conn, isd_file_metadata):
cur = conn.cursor()
rows = [
(metadata["usaf_id"], year, station_data["wban_id"])
for isd_station, metadata in sorted(isd_file_metadata.items())
for year, year_data in sorted(metadata["years"].items())
for station_data in year_data
]
cur.executemany(
"""
insert into isd_file_metadata(
usaf_id
, year
, wban_id
) values (?,?,?)
""",
rows,
)
cur.execute(
"""
create index isd_file_metadata_usaf_id on
isd_file_metadata(usaf_id)
"""
)
cur.execute(
"""
create index isd_file_metadata_year on
isd_file_metadata(year)
"""
)
cur.execute(
"""
create index isd_file_metadata_usaf_id_year on
isd_file_metadata(usaf_id, year)
"""
)
cur.execute(
"""
create index isd_file_metadata_wban_id on
isd_file_metadata(wban_id)
"""
)
cur.close()
conn.commit()
def _write_zcta_metadata_table(conn, zcta_metadata, geometry=False):
cur = conn.cursor()
rows = [
(
metadata["zcta"],
metadata["geometry"] if geometry else None,
metadata["latitude"],
metadata["longitude"],
metadata["state"],
metadata["iecc_climate_zone"],
metadata["iecc_moisture_regime"],
metadata["ba_climate_zone"],
metadata["ca_climate_zone"],
)
for zcta, metadata in sorted(zcta_metadata.items())
]
cur.executemany(
"""
insert into zcta_metadata(
zcta_id
, geometry
, latitude
, longitude
, state
, iecc_climate_zone
, iecc_moisture_regime
, ba_climate_zone
, ca_climate_zone
) values (?,?,?,?,?,?,?,?,?)
""",
rows,
)
cur.execute(
"""
create index zcta_metadata_zcta_id on zcta_metadata(zcta_id)
"""
)
cur.execute(
"""
create index zcta_metadata_state on zcta_metadata(state)
"""
)
cur.execute(
"""
create index zcta_metadata_iecc_climate_zone on zcta_metadata(iecc_climate_zone)
"""
)
cur.execute(
"""
create index zcta_metadata_iecc_moisture_regime on zcta_metadata(iecc_moisture_regime)
"""
)
cur.execute(
"""
create index zcta_metadata_ba_climate_zone on zcta_metadata(ba_climate_zone)
"""
)
cur.execute(
"""
create index zcta_metadata_ca_climate_zone on zcta_metadata(ca_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_iecc_climate_zone_metadata_table(
conn, iecc_climate_zone_metadata, geometry=True
):
cur = conn.cursor()
rows = [
(metadata["iecc_climate_zone"], metadata["geometry"] if geometry else None)
for iecc_climate_zone, metadata in sorted(iecc_climate_zone_metadata.items())
]
cur.executemany(
"""
insert into iecc_climate_zone_metadata(
iecc_climate_zone
, geometry
) values (?,?)
""",
rows,
)
cur.execute(
"""
create index iecc_climate_zone_metadata_iecc_climate_zone on
iecc_climate_zone_metadata(iecc_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_iecc_moisture_regime_metadata_table(
conn, iecc_moisture_regime_metadata, geometry=True
):
cur = conn.cursor()
rows = [
(metadata["iecc_moisture_regime"], metadata["geometry"] if geometry else None)
for iecc_moisture_regime, metadata in sorted(
iecc_moisture_regime_metadata.items()
)
]
cur.executemany(
"""
insert into iecc_moisture_regime_metadata(
iecc_moisture_regime
, geometry
) values (?,?)
""",
rows,
)
cur.execute(
"""
create index iecc_moisture_regime_metadata_iecc_moisture_regime on
iecc_moisture_regime_metadata(iecc_moisture_regime)
"""
)
cur.close()
conn.commit()
def _write_ba_climate_zone_metadata_table(
conn, ba_climate_zone_metadata, geometry=True
):
cur = conn.cursor()
rows = [
(metadata["ba_climate_zone"], metadata["geometry"] if geometry else None)
for ba_climate_zone, metadata in sorted(ba_climate_zone_metadata.items())
]
cur.executemany(
"""
insert into ba_climate_zone_metadata(
ba_climate_zone
, geometry
) values (?,?)
""",
rows,
)
cur.execute(
"""
create index ba_climate_zone_metadata_ba_climate_zone on
ba_climate_zone_metadata(ba_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_ca_climate_zone_metadata_table(
conn, ca_climate_zone_metadata, geometry=True
):
cur = conn.cursor()
rows = [
(
metadata["ca_climate_zone"],
metadata["name"],
metadata["geometry"] if geometry else None,
)
for ca_climate_zone, metadata in sorted(ca_climate_zone_metadata.items())
]
cur.executemany(
"""
insert into ca_climate_zone_metadata(
ca_climate_zone
, name
, geometry
) values (?,?,?)
""",
rows,
)
cur.execute(
"""
create index ca_climate_zone_metadata_ca_climate_zone on
ca_climate_zone_metadata(ca_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_tmy3_station_metadata_table(conn, tmy3_station_metadata):
cur = conn.cursor()
rows = [
(metadata["usaf_id"], metadata["name"], metadata["state"], metadata["class"])
for tmy3_station, metadata in sorted(tmy3_station_metadata.items())
]
cur.executemany(
"""
insert into tmy3_station_metadata(
usaf_id
, name
, state
, class
) values (?,?,?,?)
""",
rows,
)
cur.execute(
"""
create index tmy3_station_metadata_usaf_id on
tmy3_station_metadata(usaf_id)
"""
)
cur.close()
conn.commit()
def _write_cz2010_station_metadata_table(conn, cz2010_station_metadata):
cur = conn.cursor()
rows = [
(metadata["usaf_id"],)
for cz2010_station, metadata in sorted(cz2010_station_metadata.items())
]
cur.executemany(
"""
insert into cz2010_station_metadata(
usaf_id
) values (?)
""",
rows,
)
cur.execute(
"""
create index cz2010_station_metadata_usaf_id on
cz2010_station_metadata(usaf_id)
"""
)
cur.close()
conn.commit()
def inspect_metadata_db():
subprocess.call(["sqlite3", metadata_db_connection_proxy.db_path])
|
openeemeter/eeweather | eeweather/stations.py | ISDStation.json | python | def json(self):
return {
"elevation": self.elevation,
"latitude": self.latitude,
"longitude": self.longitude,
"icao_code": self.icao_code,
"name": self.name,
"quality": self.quality,
"wban_ids": self.wban_ids,
"recent_wban_id": self.recent_wban_id,
"climate_zones": {
"iecc_climate_zone": self.iecc_climate_zone,
"iecc_moisture_regime": self.iecc_moisture_regime,
"ba_climate_zone": self.ba_climate_zone,
"ca_climate_zone": self.ca_climate_zone,
},
} | Return a JSON-serializeable object containing station metadata. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/stations.py#L1151-L1168 | null | class ISDStation(object):
""" A representation of an Integrated Surface Database weather station.
Contains data about a particular ISD station, as well as methods to pull
data for this station.
Parameters
----------
usaf_id : str
ISD station USAF ID
load_metatdata : bool, optional
Whether or not to auto-load metadata for this station
Attributes
----------
usaf_id : str
ISD station USAF ID
iecc_climate_zone : str
IECC Climate Zone
iecc_moisture_regime : str
IECC Moisture Regime
ba_climate_zone : str
Building America Climate Zone
ca_climate_zone : str
California Building Climate Zone
elevation : float
elevation of station
latitude : float
latitude of station
longitude : float
longitude of station
coords : tuple of (float, float)
lat/long coordinates of station
name : str
name of the station
quality : str
"high", "medium", "low"
wban_ids : list of str
list of WBAN IDs, or "99999" which have been used to identify the station.
recent_wban_id = None
WBAN ID most recently used to identify the station.
climate_zones = {}
dict of all climate zones.
"""
def __init__(self, usaf_id, load_metadata=True):
self.usaf_id = usaf_id
if load_metadata:
self._load_metadata()
else:
valid_usaf_id_or_raise(usaf_id)
self.iecc_climate_zone = None
self.iecc_moisture_regime = None
self.ba_climate_zone = None
self.ca_climate_zone = None
self.elevation = None
self.latitude = None
self.longitude = None
self.coords = None
self.name = None
self.quality = None
self.wban_ids = None
self.recent_wban_id = None
self.climate_zones = {}
def __str__(self):
return self.usaf_id
def __repr__(self):
return "ISDStation('{}')".format(self.usaf_id)
def _load_metadata(self):
metadata = get_isd_station_metadata(self.usaf_id)
def _float_or_none(field):
value = metadata.get(field)
return None if value is None else float(value)
self.iecc_climate_zone = metadata.get("iecc_climate_zone")
self.iecc_moisture_regime = metadata.get("iecc_moisture_regime")
self.ba_climate_zone = metadata.get("ba_climate_zone")
self.ca_climate_zone = metadata.get("ca_climate_zone")
self.icao_code = metadata.get("icao_code")
self.elevation = _float_or_none("elevation") # meters
self.latitude = _float_or_none("latitude")
self.longitude = _float_or_none("longitude")
self.coords = (self.latitude, self.longitude)
self.name = metadata.get("name")
self.quality = metadata.get("quality")
self.wban_ids = metadata.get("wban_ids", "").split(",")
self.recent_wban_id = metadata.get("recent_wban_id")
self.climate_zones = {
"iecc_climate_zone": metadata.get("iecc_climate_zone"),
"iecc_moisture_regime": metadata.get("iecc_moisture_regime"),
"ba_climate_zone": metadata.get("ba_climate_zone"),
"ca_climate_zone": metadata.get("ca_climate_zone"),
}
def get_isd_filenames(self, year=None, with_host=False):
""" Get filenames of raw ISD station data. """
return get_isd_filenames(self.usaf_id, year, with_host=with_host)
def get_gsod_filenames(self, year=None, with_host=False):
""" Get filenames of raw GSOD station data. """
return get_gsod_filenames(self.usaf_id, year, with_host=with_host)
def get_isd_file_metadata(self):
""" Get raw file metadata for the station. """
return get_isd_file_metadata(self.usaf_id)
# fetch raw data
def fetch_isd_raw_temp_data(self, year):
""" Pull raw ISD data for the given year directly from FTP. """
return fetch_isd_raw_temp_data(self.usaf_id, year)
def fetch_gsod_raw_temp_data(self, year):
""" Pull raw GSOD data for the given year directly from FTP. """
return fetch_gsod_raw_temp_data(self.usaf_id, year)
# fetch raw data then frequency-normalize
def fetch_isd_hourly_temp_data(self, year):
""" Pull raw ISD temperature data for the given year directly from FTP and resample to hourly time series. """
return fetch_isd_hourly_temp_data(self.usaf_id, year)
def fetch_isd_daily_temp_data(self, year):
""" Pull raw ISD temperature data for the given year directly from FTP and resample to daily time series. """
return fetch_isd_daily_temp_data(self.usaf_id, year)
def fetch_gsod_daily_temp_data(self, year):
""" Pull raw GSOD temperature data for the given year directly from FTP and resample to daily time series. """
return fetch_gsod_daily_temp_data(self.usaf_id, year)
def fetch_tmy3_hourly_temp_data(self):
""" Pull hourly TMY3 temperature hourly time series directly from NREL. """
return fetch_tmy3_hourly_temp_data(self.usaf_id)
def fetch_cz2010_hourly_temp_data(self):
""" Pull hourly CZ2010 temperature hourly time series from URL. """
return fetch_cz2010_hourly_temp_data(self.usaf_id)
# get key-value store key
def get_isd_hourly_temp_data_cache_key(self, year):
""" Get key used to cache resampled hourly ISD temperature data for the given year. """
return get_isd_hourly_temp_data_cache_key(self.usaf_id, year)
def get_isd_daily_temp_data_cache_key(self, year):
""" Get key used to cache resampled daily ISD temperature data for the given year. """
return get_isd_daily_temp_data_cache_key(self.usaf_id, year)
def get_gsod_daily_temp_data_cache_key(self, year):
""" Get key used to cache resampled daily GSOD temperature data for the given year. """
return get_gsod_daily_temp_data_cache_key(self.usaf_id, year)
def get_tmy3_hourly_temp_data_cache_key(self):
""" Get key used to cache TMY3 weather-normalized temperature data. """
return get_tmy3_hourly_temp_data_cache_key(self.usaf_id)
def get_cz2010_hourly_temp_data_cache_key(self):
""" Get key used to cache CZ2010 weather-normalized temperature data. """
return get_cz2010_hourly_temp_data_cache_key(self.usaf_id)
# is cached data expired? boolean. true if expired or not in cache
def cached_isd_hourly_temp_data_is_expired(self, year):
""" Return True if cache of resampled hourly ISD temperature data has expired or does not exist for the given year. """
return cached_isd_hourly_temp_data_is_expired(self.usaf_id, year)
def cached_isd_daily_temp_data_is_expired(self, year):
""" Return True if cache of resampled daily ISD temperature data has expired or does not exist for the given year. """
return cached_isd_daily_temp_data_is_expired(self.usaf_id, year)
def cached_gsod_daily_temp_data_is_expired(self, year):
""" Return True if cache of resampled daily GSOD temperature data has expired or does not exist for the given year. """
return cached_gsod_daily_temp_data_is_expired(self.usaf_id, year)
# check if data is available and delete data in the cache if it's expired
def validate_isd_hourly_temp_data_cache(self, year):
""" Delete cached resampled hourly ISD temperature data if it has expired for the given year. """
return validate_isd_hourly_temp_data_cache(self.usaf_id, year)
def validate_isd_daily_temp_data_cache(self, year):
""" Delete cached resampled daily ISD temperature data if it has expired for the given year. """
return validate_isd_daily_temp_data_cache(self.usaf_id, year)
def validate_gsod_daily_temp_data_cache(self, year):
""" Delete cached resampled daily GSOD temperature data if it has expired for the given year. """
return validate_gsod_daily_temp_data_cache(self.usaf_id, year)
def validate_tmy3_hourly_temp_data_cache(self):
""" Check if TMY3 data exists in cache. """
return validate_tmy3_hourly_temp_data_cache(self.usaf_id)
def validate_cz2010_hourly_temp_data_cache(self):
""" Check if CZ2010 data exists in cache. """
return validate_cz2010_hourly_temp_data_cache(self.usaf_id)
# pandas time series to json
def serialize_isd_hourly_temp_data(self, ts):
""" Serialize resampled hourly ISD pandas time series as JSON for caching. """
return serialize_isd_hourly_temp_data(ts)
def serialize_isd_daily_temp_data(self, ts):
""" Serialize resampled daily ISD pandas time series as JSON for caching. """
return serialize_isd_daily_temp_data(ts)
def serialize_gsod_daily_temp_data(self, ts):
""" Serialize resampled daily GSOD pandas time series as JSON for caching. """
return serialize_gsod_daily_temp_data(ts)
def serialize_tmy3_hourly_temp_data(self, ts):
""" Serialize hourly TMY3 pandas time series as JSON for caching. """
return serialize_tmy3_hourly_temp_data(ts)
def serialize_cz2010_hourly_temp_data(self, ts):
""" Serialize hourly CZ2010 pandas time series as JSON for caching. """
return serialize_cz2010_hourly_temp_data(ts)
# json to pandas time series
def deserialize_isd_hourly_temp_data(self, data):
""" Deserialize JSON representation of resampled hourly ISD into pandas time series. """
return deserialize_isd_hourly_temp_data(data)
def deserialize_isd_daily_temp_data(self, data):
""" Deserialize JSON representation of resampled daily ISD into pandas time series. """
return deserialize_isd_daily_temp_data(data)
def deserialize_gsod_daily_temp_data(self, data):
""" Deserialize JSON representation of resampled daily GSOD into pandas time series. """
return deserialize_gsod_daily_temp_data(data)
def deserialize_tmy3_hourly_temp_data(self, data):
""" Deserialize JSON representation of hourly TMY3 into pandas time series. """
return deserialize_isd_hourly_temp_data(data)
def deserialize_cz2010_hourly_temp_data(self, data):
""" Deserialize JSON representation of hourly CZ2010 into pandas time series. """
return deserialize_cz2010_hourly_temp_data(data)
# return pandas time series of data from cache
def read_isd_hourly_temp_data_from_cache(self, year):
""" Get cached version of resampled hourly ISD temperature data for given year. """
return read_isd_hourly_temp_data_from_cache(self.usaf_id, year)
def read_isd_daily_temp_data_from_cache(self, year):
""" Get cached version of resampled daily ISD temperature data for given year. """
return read_isd_daily_temp_data_from_cache(self.usaf_id, year)
def read_gsod_daily_temp_data_from_cache(self, year):
""" Get cached version of resampled daily GSOD temperature data for given year. """
return read_gsod_daily_temp_data_from_cache(self.usaf_id, year)
def read_tmy3_hourly_temp_data_from_cache(self):
""" Get cached version of hourly TMY3 temperature data. """
return read_tmy3_hourly_temp_data_from_cache(self.usaf_id)
def read_cz2010_hourly_temp_data_from_cache(self):
""" Get cached version of hourly TMY3 temperature data. """
return read_cz2010_hourly_temp_data_from_cache(self.usaf_id)
# write pandas time series of data to cache for a particular year
def write_isd_hourly_temp_data_to_cache(self, year, ts):
""" Write resampled hourly ISD temperature data to cache for given year. """
return write_isd_hourly_temp_data_to_cache(self.usaf_id, year, ts)
def write_isd_daily_temp_data_to_cache(self, year, ts):
""" Write resampled daily ISD temperature data to cache for given year. """
return write_isd_daily_temp_data_to_cache(self.usaf_id, year, ts)
def write_gsod_daily_temp_data_to_cache(self, year, ts):
""" Write resampled daily GSOD temperature data to cache for given year. """
return write_gsod_daily_temp_data_to_cache(self.usaf_id, year, ts)
def write_tmy3_hourly_temp_data_to_cache(self, ts):
""" Write hourly TMY3 temperature data to cache for given year. """
return write_tmy3_hourly_temp_data_to_cache(self.usaf_id, ts)
def write_cz2010_hourly_temp_data_to_cache(self, ts):
""" Write hourly CZ2010 temperature data to cache for given year. """
return write_cz2010_hourly_temp_data_to_cache(self.usaf_id, ts)
# delete cached data for a particular year
def destroy_cached_isd_hourly_temp_data(self, year):
""" Remove cached resampled hourly ISD temperature data to cache for given year. """
return destroy_cached_isd_hourly_temp_data(self.usaf_id, year)
def destroy_cached_isd_daily_temp_data(self, year):
""" Remove cached resampled daily ISD temperature data to cache for given year. """
return destroy_cached_isd_daily_temp_data(self.usaf_id, year)
def destroy_cached_gsod_daily_temp_data(self, year):
""" Remove cached resampled daily GSOD temperature data to cache for given year. """
return destroy_cached_gsod_daily_temp_data(self.usaf_id, year)
def destroy_cached_tmy3_hourly_temp_data(self):
""" Remove cached hourly TMY3 temperature data to cache. """
return destroy_cached_tmy3_hourly_temp_data(self.usaf_id)
def destroy_cached_cz2010_hourly_temp_data(self):
""" Remove cached hourly CZ2010 temperature data to cache. """
return destroy_cached_cz2010_hourly_temp_data(self.usaf_id)
# load data either from cache if valid or directly from source
def load_isd_hourly_temp_data_cached_proxy(self, year):
""" Load resampled hourly ISD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_isd_hourly_temp_data_cached_proxy(self.usaf_id, year)
def load_isd_daily_temp_data_cached_proxy(self, year):
""" Load resampled daily ISD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_isd_daily_temp_data_cached_proxy(self.usaf_id, year)
def load_gsod_daily_temp_data_cached_proxy(self, year):
""" Load resampled daily GSOD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_gsod_daily_temp_data_cached_proxy(self.usaf_id, year)
def load_tmy3_hourly_temp_data_cached_proxy(self):
""" Load hourly TMY3 temperature data from cache, or if it is expired or hadn't been cached, fetch from NREL. """
return load_tmy3_hourly_temp_data_cached_proxy(self.usaf_id)
def load_cz2010_hourly_temp_data_cached_proxy(self):
""" Load hourly CZ2010 temperature data from cache, or if it is expired or hadn't been cached, fetch from URL. """
return load_cz2010_hourly_temp_data_cached_proxy(self.usaf_id)
# main interface: load data from start date to end date
def load_isd_hourly_temp_data(
self,
start,
end,
read_from_cache=True,
write_to_cache=True,
error_on_missing_years=True,
):
""" Load resampled hourly ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled hourly ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_isd_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
error_on_missing_years=error_on_missing_years,
)
def load_isd_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load resampled daily ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_isd_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_gsod_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load resampled daily GSOD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily GSOD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_gsod_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_tmy3_hourly_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load hourly TMY3 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly TMY3 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_tmy3_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_cz2010_hourly_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load hourly CZ2010 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly CZ2010 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_cz2010_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
# load all cached data for this station
def load_cached_isd_hourly_temp_data(self):
""" Load all cached resampled hourly ISD temperature data. """
return load_cached_isd_hourly_temp_data(self.usaf_id)
def load_cached_isd_daily_temp_data(self):
""" Load all cached resampled daily ISD temperature data. """
return load_cached_isd_daily_temp_data(self.usaf_id)
def load_cached_gsod_daily_temp_data(self):
""" Load all cached resampled daily GSOD temperature data. """
return load_cached_gsod_daily_temp_data(self.usaf_id)
def load_cached_tmy3_hourly_temp_data(self):
""" Load all cached hourly TMY3 temperature data (the year is set to 1900) """
return load_cached_tmy3_hourly_temp_data(self.usaf_id)
def load_cached_cz2010_hourly_temp_data(self):
""" Load all cached hourly TMY3 temperature data (the year is set to 1900) """
return load_cached_cz2010_hourly_temp_data(self.usaf_id)
|
openeemeter/eeweather | eeweather/stations.py | ISDStation.get_isd_filenames | python | def get_isd_filenames(self, year=None, with_host=False):
return get_isd_filenames(self.usaf_id, year, with_host=with_host) | Get filenames of raw ISD station data. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/stations.py#L1170-L1172 | [
"def get_isd_filenames(usaf_id, target_year=None, filename_format=None, with_host=False):\n valid_usaf_id_or_raise(usaf_id)\n if filename_format is None:\n filename_format = \"/pub/data/noaa/{year}/{usaf_id}-{wban_id}-{year}.gz\"\n conn = metadata_db_connection_proxy.get_connection()\n cur = conn... | class ISDStation(object):
""" A representation of an Integrated Surface Database weather station.
Contains data about a particular ISD station, as well as methods to pull
data for this station.
Parameters
----------
usaf_id : str
ISD station USAF ID
load_metatdata : bool, optional
Whether or not to auto-load metadata for this station
Attributes
----------
usaf_id : str
ISD station USAF ID
iecc_climate_zone : str
IECC Climate Zone
iecc_moisture_regime : str
IECC Moisture Regime
ba_climate_zone : str
Building America Climate Zone
ca_climate_zone : str
California Building Climate Zone
elevation : float
elevation of station
latitude : float
latitude of station
longitude : float
longitude of station
coords : tuple of (float, float)
lat/long coordinates of station
name : str
name of the station
quality : str
"high", "medium", "low"
wban_ids : list of str
list of WBAN IDs, or "99999" which have been used to identify the station.
recent_wban_id = None
WBAN ID most recently used to identify the station.
climate_zones = {}
dict of all climate zones.
"""
def __init__(self, usaf_id, load_metadata=True):
self.usaf_id = usaf_id
if load_metadata:
self._load_metadata()
else:
valid_usaf_id_or_raise(usaf_id)
self.iecc_climate_zone = None
self.iecc_moisture_regime = None
self.ba_climate_zone = None
self.ca_climate_zone = None
self.elevation = None
self.latitude = None
self.longitude = None
self.coords = None
self.name = None
self.quality = None
self.wban_ids = None
self.recent_wban_id = None
self.climate_zones = {}
def __str__(self):
return self.usaf_id
def __repr__(self):
return "ISDStation('{}')".format(self.usaf_id)
def _load_metadata(self):
metadata = get_isd_station_metadata(self.usaf_id)
def _float_or_none(field):
value = metadata.get(field)
return None if value is None else float(value)
self.iecc_climate_zone = metadata.get("iecc_climate_zone")
self.iecc_moisture_regime = metadata.get("iecc_moisture_regime")
self.ba_climate_zone = metadata.get("ba_climate_zone")
self.ca_climate_zone = metadata.get("ca_climate_zone")
self.icao_code = metadata.get("icao_code")
self.elevation = _float_or_none("elevation") # meters
self.latitude = _float_or_none("latitude")
self.longitude = _float_or_none("longitude")
self.coords = (self.latitude, self.longitude)
self.name = metadata.get("name")
self.quality = metadata.get("quality")
self.wban_ids = metadata.get("wban_ids", "").split(",")
self.recent_wban_id = metadata.get("recent_wban_id")
self.climate_zones = {
"iecc_climate_zone": metadata.get("iecc_climate_zone"),
"iecc_moisture_regime": metadata.get("iecc_moisture_regime"),
"ba_climate_zone": metadata.get("ba_climate_zone"),
"ca_climate_zone": metadata.get("ca_climate_zone"),
}
def json(self):
""" Return a JSON-serializeable object containing station metadata."""
return {
"elevation": self.elevation,
"latitude": self.latitude,
"longitude": self.longitude,
"icao_code": self.icao_code,
"name": self.name,
"quality": self.quality,
"wban_ids": self.wban_ids,
"recent_wban_id": self.recent_wban_id,
"climate_zones": {
"iecc_climate_zone": self.iecc_climate_zone,
"iecc_moisture_regime": self.iecc_moisture_regime,
"ba_climate_zone": self.ba_climate_zone,
"ca_climate_zone": self.ca_climate_zone,
},
}
def get_gsod_filenames(self, year=None, with_host=False):
""" Get filenames of raw GSOD station data. """
return get_gsod_filenames(self.usaf_id, year, with_host=with_host)
def get_isd_file_metadata(self):
""" Get raw file metadata for the station. """
return get_isd_file_metadata(self.usaf_id)
# fetch raw data
def fetch_isd_raw_temp_data(self, year):
""" Pull raw ISD data for the given year directly from FTP. """
return fetch_isd_raw_temp_data(self.usaf_id, year)
def fetch_gsod_raw_temp_data(self, year):
""" Pull raw GSOD data for the given year directly from FTP. """
return fetch_gsod_raw_temp_data(self.usaf_id, year)
# fetch raw data then frequency-normalize
def fetch_isd_hourly_temp_data(self, year):
""" Pull raw ISD temperature data for the given year directly from FTP and resample to hourly time series. """
return fetch_isd_hourly_temp_data(self.usaf_id, year)
def fetch_isd_daily_temp_data(self, year):
""" Pull raw ISD temperature data for the given year directly from FTP and resample to daily time series. """
return fetch_isd_daily_temp_data(self.usaf_id, year)
def fetch_gsod_daily_temp_data(self, year):
""" Pull raw GSOD temperature data for the given year directly from FTP and resample to daily time series. """
return fetch_gsod_daily_temp_data(self.usaf_id, year)
def fetch_tmy3_hourly_temp_data(self):
""" Pull hourly TMY3 temperature hourly time series directly from NREL. """
return fetch_tmy3_hourly_temp_data(self.usaf_id)
def fetch_cz2010_hourly_temp_data(self):
""" Pull hourly CZ2010 temperature hourly time series from URL. """
return fetch_cz2010_hourly_temp_data(self.usaf_id)
# get key-value store key
def get_isd_hourly_temp_data_cache_key(self, year):
""" Get key used to cache resampled hourly ISD temperature data for the given year. """
return get_isd_hourly_temp_data_cache_key(self.usaf_id, year)
def get_isd_daily_temp_data_cache_key(self, year):
""" Get key used to cache resampled daily ISD temperature data for the given year. """
return get_isd_daily_temp_data_cache_key(self.usaf_id, year)
def get_gsod_daily_temp_data_cache_key(self, year):
""" Get key used to cache resampled daily GSOD temperature data for the given year. """
return get_gsod_daily_temp_data_cache_key(self.usaf_id, year)
def get_tmy3_hourly_temp_data_cache_key(self):
""" Get key used to cache TMY3 weather-normalized temperature data. """
return get_tmy3_hourly_temp_data_cache_key(self.usaf_id)
def get_cz2010_hourly_temp_data_cache_key(self):
""" Get key used to cache CZ2010 weather-normalized temperature data. """
return get_cz2010_hourly_temp_data_cache_key(self.usaf_id)
# is cached data expired? boolean. true if expired or not in cache
def cached_isd_hourly_temp_data_is_expired(self, year):
""" Return True if cache of resampled hourly ISD temperature data has expired or does not exist for the given year. """
return cached_isd_hourly_temp_data_is_expired(self.usaf_id, year)
def cached_isd_daily_temp_data_is_expired(self, year):
""" Return True if cache of resampled daily ISD temperature data has expired or does not exist for the given year. """
return cached_isd_daily_temp_data_is_expired(self.usaf_id, year)
def cached_gsod_daily_temp_data_is_expired(self, year):
""" Return True if cache of resampled daily GSOD temperature data has expired or does not exist for the given year. """
return cached_gsod_daily_temp_data_is_expired(self.usaf_id, year)
# check if data is available and delete data in the cache if it's expired
def validate_isd_hourly_temp_data_cache(self, year):
""" Delete cached resampled hourly ISD temperature data if it has expired for the given year. """
return validate_isd_hourly_temp_data_cache(self.usaf_id, year)
def validate_isd_daily_temp_data_cache(self, year):
""" Delete cached resampled daily ISD temperature data if it has expired for the given year. """
return validate_isd_daily_temp_data_cache(self.usaf_id, year)
def validate_gsod_daily_temp_data_cache(self, year):
""" Delete cached resampled daily GSOD temperature data if it has expired for the given year. """
return validate_gsod_daily_temp_data_cache(self.usaf_id, year)
def validate_tmy3_hourly_temp_data_cache(self):
""" Check if TMY3 data exists in cache. """
return validate_tmy3_hourly_temp_data_cache(self.usaf_id)
def validate_cz2010_hourly_temp_data_cache(self):
""" Check if CZ2010 data exists in cache. """
return validate_cz2010_hourly_temp_data_cache(self.usaf_id)
# pandas time series to json
def serialize_isd_hourly_temp_data(self, ts):
""" Serialize resampled hourly ISD pandas time series as JSON for caching. """
return serialize_isd_hourly_temp_data(ts)
def serialize_isd_daily_temp_data(self, ts):
""" Serialize resampled daily ISD pandas time series as JSON for caching. """
return serialize_isd_daily_temp_data(ts)
def serialize_gsod_daily_temp_data(self, ts):
""" Serialize resampled daily GSOD pandas time series as JSON for caching. """
return serialize_gsod_daily_temp_data(ts)
def serialize_tmy3_hourly_temp_data(self, ts):
""" Serialize hourly TMY3 pandas time series as JSON for caching. """
return serialize_tmy3_hourly_temp_data(ts)
def serialize_cz2010_hourly_temp_data(self, ts):
""" Serialize hourly CZ2010 pandas time series as JSON for caching. """
return serialize_cz2010_hourly_temp_data(ts)
# json to pandas time series
def deserialize_isd_hourly_temp_data(self, data):
""" Deserialize JSON representation of resampled hourly ISD into pandas time series. """
return deserialize_isd_hourly_temp_data(data)
def deserialize_isd_daily_temp_data(self, data):
""" Deserialize JSON representation of resampled daily ISD into pandas time series. """
return deserialize_isd_daily_temp_data(data)
def deserialize_gsod_daily_temp_data(self, data):
""" Deserialize JSON representation of resampled daily GSOD into pandas time series. """
return deserialize_gsod_daily_temp_data(data)
def deserialize_tmy3_hourly_temp_data(self, data):
""" Deserialize JSON representation of hourly TMY3 into pandas time series. """
return deserialize_isd_hourly_temp_data(data)
def deserialize_cz2010_hourly_temp_data(self, data):
""" Deserialize JSON representation of hourly CZ2010 into pandas time series. """
return deserialize_cz2010_hourly_temp_data(data)
# return pandas time series of data from cache
def read_isd_hourly_temp_data_from_cache(self, year):
""" Get cached version of resampled hourly ISD temperature data for given year. """
return read_isd_hourly_temp_data_from_cache(self.usaf_id, year)
def read_isd_daily_temp_data_from_cache(self, year):
""" Get cached version of resampled daily ISD temperature data for given year. """
return read_isd_daily_temp_data_from_cache(self.usaf_id, year)
def read_gsod_daily_temp_data_from_cache(self, year):
""" Get cached version of resampled daily GSOD temperature data for given year. """
return read_gsod_daily_temp_data_from_cache(self.usaf_id, year)
def read_tmy3_hourly_temp_data_from_cache(self):
""" Get cached version of hourly TMY3 temperature data. """
return read_tmy3_hourly_temp_data_from_cache(self.usaf_id)
def read_cz2010_hourly_temp_data_from_cache(self):
""" Get cached version of hourly TMY3 temperature data. """
return read_cz2010_hourly_temp_data_from_cache(self.usaf_id)
# write pandas time series of data to cache for a particular year
def write_isd_hourly_temp_data_to_cache(self, year, ts):
""" Write resampled hourly ISD temperature data to cache for given year. """
return write_isd_hourly_temp_data_to_cache(self.usaf_id, year, ts)
def write_isd_daily_temp_data_to_cache(self, year, ts):
""" Write resampled daily ISD temperature data to cache for given year. """
return write_isd_daily_temp_data_to_cache(self.usaf_id, year, ts)
def write_gsod_daily_temp_data_to_cache(self, year, ts):
""" Write resampled daily GSOD temperature data to cache for given year. """
return write_gsod_daily_temp_data_to_cache(self.usaf_id, year, ts)
def write_tmy3_hourly_temp_data_to_cache(self, ts):
""" Write hourly TMY3 temperature data to cache for given year. """
return write_tmy3_hourly_temp_data_to_cache(self.usaf_id, ts)
def write_cz2010_hourly_temp_data_to_cache(self, ts):
""" Write hourly CZ2010 temperature data to cache for given year. """
return write_cz2010_hourly_temp_data_to_cache(self.usaf_id, ts)
# delete cached data for a particular year
def destroy_cached_isd_hourly_temp_data(self, year):
""" Remove cached resampled hourly ISD temperature data to cache for given year. """
return destroy_cached_isd_hourly_temp_data(self.usaf_id, year)
def destroy_cached_isd_daily_temp_data(self, year):
""" Remove cached resampled daily ISD temperature data to cache for given year. """
return destroy_cached_isd_daily_temp_data(self.usaf_id, year)
def destroy_cached_gsod_daily_temp_data(self, year):
""" Remove cached resampled daily GSOD temperature data to cache for given year. """
return destroy_cached_gsod_daily_temp_data(self.usaf_id, year)
def destroy_cached_tmy3_hourly_temp_data(self):
""" Remove cached hourly TMY3 temperature data to cache. """
return destroy_cached_tmy3_hourly_temp_data(self.usaf_id)
def destroy_cached_cz2010_hourly_temp_data(self):
""" Remove cached hourly CZ2010 temperature data to cache. """
return destroy_cached_cz2010_hourly_temp_data(self.usaf_id)
# load data either from cache if valid or directly from source
def load_isd_hourly_temp_data_cached_proxy(self, year):
""" Load resampled hourly ISD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_isd_hourly_temp_data_cached_proxy(self.usaf_id, year)
def load_isd_daily_temp_data_cached_proxy(self, year):
""" Load resampled daily ISD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_isd_daily_temp_data_cached_proxy(self.usaf_id, year)
def load_gsod_daily_temp_data_cached_proxy(self, year):
""" Load resampled daily GSOD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_gsod_daily_temp_data_cached_proxy(self.usaf_id, year)
def load_tmy3_hourly_temp_data_cached_proxy(self):
""" Load hourly TMY3 temperature data from cache, or if it is expired or hadn't been cached, fetch from NREL. """
return load_tmy3_hourly_temp_data_cached_proxy(self.usaf_id)
def load_cz2010_hourly_temp_data_cached_proxy(self):
""" Load hourly CZ2010 temperature data from cache, or if it is expired or hadn't been cached, fetch from URL. """
return load_cz2010_hourly_temp_data_cached_proxy(self.usaf_id)
# main interface: load data from start date to end date
def load_isd_hourly_temp_data(
self,
start,
end,
read_from_cache=True,
write_to_cache=True,
error_on_missing_years=True,
):
""" Load resampled hourly ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled hourly ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_isd_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
error_on_missing_years=error_on_missing_years,
)
def load_isd_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load resampled daily ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_isd_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_gsod_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load resampled daily GSOD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily GSOD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_gsod_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_tmy3_hourly_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load hourly TMY3 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly TMY3 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_tmy3_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_cz2010_hourly_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load hourly CZ2010 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly CZ2010 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_cz2010_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
# load all cached data for this station
def load_cached_isd_hourly_temp_data(self):
""" Load all cached resampled hourly ISD temperature data. """
return load_cached_isd_hourly_temp_data(self.usaf_id)
def load_cached_isd_daily_temp_data(self):
""" Load all cached resampled daily ISD temperature data. """
return load_cached_isd_daily_temp_data(self.usaf_id)
def load_cached_gsod_daily_temp_data(self):
""" Load all cached resampled daily GSOD temperature data. """
return load_cached_gsod_daily_temp_data(self.usaf_id)
def load_cached_tmy3_hourly_temp_data(self):
""" Load all cached hourly TMY3 temperature data (the year is set to 1900) """
return load_cached_tmy3_hourly_temp_data(self.usaf_id)
def load_cached_cz2010_hourly_temp_data(self):
""" Load all cached hourly TMY3 temperature data (the year is set to 1900) """
return load_cached_cz2010_hourly_temp_data(self.usaf_id)
|
openeemeter/eeweather | eeweather/stations.py | ISDStation.get_gsod_filenames | python | def get_gsod_filenames(self, year=None, with_host=False):
return get_gsod_filenames(self.usaf_id, year, with_host=with_host) | Get filenames of raw GSOD station data. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/stations.py#L1174-L1176 | [
"def get_gsod_filenames(usaf_id, year=None, with_host=False):\n filename_format = \"/pub/data/gsod/{year}/{usaf_id}-{wban_id}-{year}.op.gz\"\n return get_isd_filenames(\n usaf_id, year, filename_format=filename_format, with_host=with_host\n )\n"
] | class ISDStation(object):
""" A representation of an Integrated Surface Database weather station.
Contains data about a particular ISD station, as well as methods to pull
data for this station.
Parameters
----------
usaf_id : str
ISD station USAF ID
load_metatdata : bool, optional
Whether or not to auto-load metadata for this station
Attributes
----------
usaf_id : str
ISD station USAF ID
iecc_climate_zone : str
IECC Climate Zone
iecc_moisture_regime : str
IECC Moisture Regime
ba_climate_zone : str
Building America Climate Zone
ca_climate_zone : str
California Building Climate Zone
elevation : float
elevation of station
latitude : float
latitude of station
longitude : float
longitude of station
coords : tuple of (float, float)
lat/long coordinates of station
name : str
name of the station
quality : str
"high", "medium", "low"
wban_ids : list of str
list of WBAN IDs, or "99999" which have been used to identify the station.
recent_wban_id = None
WBAN ID most recently used to identify the station.
climate_zones = {}
dict of all climate zones.
"""
def __init__(self, usaf_id, load_metadata=True):
self.usaf_id = usaf_id
if load_metadata:
self._load_metadata()
else:
valid_usaf_id_or_raise(usaf_id)
self.iecc_climate_zone = None
self.iecc_moisture_regime = None
self.ba_climate_zone = None
self.ca_climate_zone = None
self.elevation = None
self.latitude = None
self.longitude = None
self.coords = None
self.name = None
self.quality = None
self.wban_ids = None
self.recent_wban_id = None
self.climate_zones = {}
def __str__(self):
return self.usaf_id
def __repr__(self):
return "ISDStation('{}')".format(self.usaf_id)
def _load_metadata(self):
metadata = get_isd_station_metadata(self.usaf_id)
def _float_or_none(field):
value = metadata.get(field)
return None if value is None else float(value)
self.iecc_climate_zone = metadata.get("iecc_climate_zone")
self.iecc_moisture_regime = metadata.get("iecc_moisture_regime")
self.ba_climate_zone = metadata.get("ba_climate_zone")
self.ca_climate_zone = metadata.get("ca_climate_zone")
self.icao_code = metadata.get("icao_code")
self.elevation = _float_or_none("elevation") # meters
self.latitude = _float_or_none("latitude")
self.longitude = _float_or_none("longitude")
self.coords = (self.latitude, self.longitude)
self.name = metadata.get("name")
self.quality = metadata.get("quality")
self.wban_ids = metadata.get("wban_ids", "").split(",")
self.recent_wban_id = metadata.get("recent_wban_id")
self.climate_zones = {
"iecc_climate_zone": metadata.get("iecc_climate_zone"),
"iecc_moisture_regime": metadata.get("iecc_moisture_regime"),
"ba_climate_zone": metadata.get("ba_climate_zone"),
"ca_climate_zone": metadata.get("ca_climate_zone"),
}
def json(self):
""" Return a JSON-serializeable object containing station metadata."""
return {
"elevation": self.elevation,
"latitude": self.latitude,
"longitude": self.longitude,
"icao_code": self.icao_code,
"name": self.name,
"quality": self.quality,
"wban_ids": self.wban_ids,
"recent_wban_id": self.recent_wban_id,
"climate_zones": {
"iecc_climate_zone": self.iecc_climate_zone,
"iecc_moisture_regime": self.iecc_moisture_regime,
"ba_climate_zone": self.ba_climate_zone,
"ca_climate_zone": self.ca_climate_zone,
},
}
def get_isd_filenames(self, year=None, with_host=False):
""" Get filenames of raw ISD station data. """
return get_isd_filenames(self.usaf_id, year, with_host=with_host)
def get_isd_file_metadata(self):
""" Get raw file metadata for the station. """
return get_isd_file_metadata(self.usaf_id)
# fetch raw data
def fetch_isd_raw_temp_data(self, year):
""" Pull raw ISD data for the given year directly from FTP. """
return fetch_isd_raw_temp_data(self.usaf_id, year)
def fetch_gsod_raw_temp_data(self, year):
""" Pull raw GSOD data for the given year directly from FTP. """
return fetch_gsod_raw_temp_data(self.usaf_id, year)
# fetch raw data then frequency-normalize
def fetch_isd_hourly_temp_data(self, year):
""" Pull raw ISD temperature data for the given year directly from FTP and resample to hourly time series. """
return fetch_isd_hourly_temp_data(self.usaf_id, year)
def fetch_isd_daily_temp_data(self, year):
""" Pull raw ISD temperature data for the given year directly from FTP and resample to daily time series. """
return fetch_isd_daily_temp_data(self.usaf_id, year)
def fetch_gsod_daily_temp_data(self, year):
""" Pull raw GSOD temperature data for the given year directly from FTP and resample to daily time series. """
return fetch_gsod_daily_temp_data(self.usaf_id, year)
def fetch_tmy3_hourly_temp_data(self):
""" Pull hourly TMY3 temperature hourly time series directly from NREL. """
return fetch_tmy3_hourly_temp_data(self.usaf_id)
def fetch_cz2010_hourly_temp_data(self):
""" Pull hourly CZ2010 temperature hourly time series from URL. """
return fetch_cz2010_hourly_temp_data(self.usaf_id)
# get key-value store key
def get_isd_hourly_temp_data_cache_key(self, year):
""" Get key used to cache resampled hourly ISD temperature data for the given year. """
return get_isd_hourly_temp_data_cache_key(self.usaf_id, year)
def get_isd_daily_temp_data_cache_key(self, year):
""" Get key used to cache resampled daily ISD temperature data for the given year. """
return get_isd_daily_temp_data_cache_key(self.usaf_id, year)
def get_gsod_daily_temp_data_cache_key(self, year):
""" Get key used to cache resampled daily GSOD temperature data for the given year. """
return get_gsod_daily_temp_data_cache_key(self.usaf_id, year)
def get_tmy3_hourly_temp_data_cache_key(self):
""" Get key used to cache TMY3 weather-normalized temperature data. """
return get_tmy3_hourly_temp_data_cache_key(self.usaf_id)
def get_cz2010_hourly_temp_data_cache_key(self):
""" Get key used to cache CZ2010 weather-normalized temperature data. """
return get_cz2010_hourly_temp_data_cache_key(self.usaf_id)
# is cached data expired? boolean. true if expired or not in cache
def cached_isd_hourly_temp_data_is_expired(self, year):
""" Return True if cache of resampled hourly ISD temperature data has expired or does not exist for the given year. """
return cached_isd_hourly_temp_data_is_expired(self.usaf_id, year)
def cached_isd_daily_temp_data_is_expired(self, year):
""" Return True if cache of resampled daily ISD temperature data has expired or does not exist for the given year. """
return cached_isd_daily_temp_data_is_expired(self.usaf_id, year)
def cached_gsod_daily_temp_data_is_expired(self, year):
""" Return True if cache of resampled daily GSOD temperature data has expired or does not exist for the given year. """
return cached_gsod_daily_temp_data_is_expired(self.usaf_id, year)
# check if data is available and delete data in the cache if it's expired
def validate_isd_hourly_temp_data_cache(self, year):
""" Delete cached resampled hourly ISD temperature data if it has expired for the given year. """
return validate_isd_hourly_temp_data_cache(self.usaf_id, year)
def validate_isd_daily_temp_data_cache(self, year):
""" Delete cached resampled daily ISD temperature data if it has expired for the given year. """
return validate_isd_daily_temp_data_cache(self.usaf_id, year)
def validate_gsod_daily_temp_data_cache(self, year):
""" Delete cached resampled daily GSOD temperature data if it has expired for the given year. """
return validate_gsod_daily_temp_data_cache(self.usaf_id, year)
def validate_tmy3_hourly_temp_data_cache(self):
""" Check if TMY3 data exists in cache. """
return validate_tmy3_hourly_temp_data_cache(self.usaf_id)
def validate_cz2010_hourly_temp_data_cache(self):
""" Check if CZ2010 data exists in cache. """
return validate_cz2010_hourly_temp_data_cache(self.usaf_id)
# pandas time series to json
def serialize_isd_hourly_temp_data(self, ts):
""" Serialize resampled hourly ISD pandas time series as JSON for caching. """
return serialize_isd_hourly_temp_data(ts)
def serialize_isd_daily_temp_data(self, ts):
""" Serialize resampled daily ISD pandas time series as JSON for caching. """
return serialize_isd_daily_temp_data(ts)
def serialize_gsod_daily_temp_data(self, ts):
""" Serialize resampled daily GSOD pandas time series as JSON for caching. """
return serialize_gsod_daily_temp_data(ts)
def serialize_tmy3_hourly_temp_data(self, ts):
""" Serialize hourly TMY3 pandas time series as JSON for caching. """
return serialize_tmy3_hourly_temp_data(ts)
def serialize_cz2010_hourly_temp_data(self, ts):
""" Serialize hourly CZ2010 pandas time series as JSON for caching. """
return serialize_cz2010_hourly_temp_data(ts)
# json to pandas time series
def deserialize_isd_hourly_temp_data(self, data):
""" Deserialize JSON representation of resampled hourly ISD into pandas time series. """
return deserialize_isd_hourly_temp_data(data)
def deserialize_isd_daily_temp_data(self, data):
""" Deserialize JSON representation of resampled daily ISD into pandas time series. """
return deserialize_isd_daily_temp_data(data)
def deserialize_gsod_daily_temp_data(self, data):
""" Deserialize JSON representation of resampled daily GSOD into pandas time series. """
return deserialize_gsod_daily_temp_data(data)
def deserialize_tmy3_hourly_temp_data(self, data):
""" Deserialize JSON representation of hourly TMY3 into pandas time series. """
return deserialize_isd_hourly_temp_data(data)
def deserialize_cz2010_hourly_temp_data(self, data):
""" Deserialize JSON representation of hourly CZ2010 into pandas time series. """
return deserialize_cz2010_hourly_temp_data(data)
# return pandas time series of data from cache
def read_isd_hourly_temp_data_from_cache(self, year):
""" Get cached version of resampled hourly ISD temperature data for given year. """
return read_isd_hourly_temp_data_from_cache(self.usaf_id, year)
def read_isd_daily_temp_data_from_cache(self, year):
""" Get cached version of resampled daily ISD temperature data for given year. """
return read_isd_daily_temp_data_from_cache(self.usaf_id, year)
def read_gsod_daily_temp_data_from_cache(self, year):
""" Get cached version of resampled daily GSOD temperature data for given year. """
return read_gsod_daily_temp_data_from_cache(self.usaf_id, year)
def read_tmy3_hourly_temp_data_from_cache(self):
""" Get cached version of hourly TMY3 temperature data. """
return read_tmy3_hourly_temp_data_from_cache(self.usaf_id)
def read_cz2010_hourly_temp_data_from_cache(self):
""" Get cached version of hourly TMY3 temperature data. """
return read_cz2010_hourly_temp_data_from_cache(self.usaf_id)
# write pandas time series of data to cache for a particular year
def write_isd_hourly_temp_data_to_cache(self, year, ts):
""" Write resampled hourly ISD temperature data to cache for given year. """
return write_isd_hourly_temp_data_to_cache(self.usaf_id, year, ts)
def write_isd_daily_temp_data_to_cache(self, year, ts):
""" Write resampled daily ISD temperature data to cache for given year. """
return write_isd_daily_temp_data_to_cache(self.usaf_id, year, ts)
def write_gsod_daily_temp_data_to_cache(self, year, ts):
""" Write resampled daily GSOD temperature data to cache for given year. """
return write_gsod_daily_temp_data_to_cache(self.usaf_id, year, ts)
def write_tmy3_hourly_temp_data_to_cache(self, ts):
""" Write hourly TMY3 temperature data to cache for given year. """
return write_tmy3_hourly_temp_data_to_cache(self.usaf_id, ts)
def write_cz2010_hourly_temp_data_to_cache(self, ts):
""" Write hourly CZ2010 temperature data to cache for given year. """
return write_cz2010_hourly_temp_data_to_cache(self.usaf_id, ts)
# delete cached data for a particular year
def destroy_cached_isd_hourly_temp_data(self, year):
""" Remove cached resampled hourly ISD temperature data to cache for given year. """
return destroy_cached_isd_hourly_temp_data(self.usaf_id, year)
def destroy_cached_isd_daily_temp_data(self, year):
""" Remove cached resampled daily ISD temperature data to cache for given year. """
return destroy_cached_isd_daily_temp_data(self.usaf_id, year)
def destroy_cached_gsod_daily_temp_data(self, year):
""" Remove cached resampled daily GSOD temperature data to cache for given year. """
return destroy_cached_gsod_daily_temp_data(self.usaf_id, year)
def destroy_cached_tmy3_hourly_temp_data(self):
""" Remove cached hourly TMY3 temperature data to cache. """
return destroy_cached_tmy3_hourly_temp_data(self.usaf_id)
def destroy_cached_cz2010_hourly_temp_data(self):
""" Remove cached hourly CZ2010 temperature data to cache. """
return destroy_cached_cz2010_hourly_temp_data(self.usaf_id)
# load data either from cache if valid or directly from source
def load_isd_hourly_temp_data_cached_proxy(self, year):
""" Load resampled hourly ISD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_isd_hourly_temp_data_cached_proxy(self.usaf_id, year)
def load_isd_daily_temp_data_cached_proxy(self, year):
""" Load resampled daily ISD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_isd_daily_temp_data_cached_proxy(self.usaf_id, year)
def load_gsod_daily_temp_data_cached_proxy(self, year):
""" Load resampled daily GSOD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_gsod_daily_temp_data_cached_proxy(self.usaf_id, year)
def load_tmy3_hourly_temp_data_cached_proxy(self):
""" Load hourly TMY3 temperature data from cache, or if it is expired or hadn't been cached, fetch from NREL. """
return load_tmy3_hourly_temp_data_cached_proxy(self.usaf_id)
def load_cz2010_hourly_temp_data_cached_proxy(self):
""" Load hourly CZ2010 temperature data from cache, or if it is expired or hadn't been cached, fetch from URL. """
return load_cz2010_hourly_temp_data_cached_proxy(self.usaf_id)
# main interface: load data from start date to end date
def load_isd_hourly_temp_data(
self,
start,
end,
read_from_cache=True,
write_to_cache=True,
error_on_missing_years=True,
):
""" Load resampled hourly ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled hourly ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_isd_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
error_on_missing_years=error_on_missing_years,
)
def load_isd_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load resampled daily ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_isd_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_gsod_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load resampled daily GSOD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily GSOD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_gsod_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_tmy3_hourly_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load hourly TMY3 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly TMY3 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_tmy3_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_cz2010_hourly_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load hourly CZ2010 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly CZ2010 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_cz2010_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
# load all cached data for this station
def load_cached_isd_hourly_temp_data(self):
""" Load all cached resampled hourly ISD temperature data. """
return load_cached_isd_hourly_temp_data(self.usaf_id)
def load_cached_isd_daily_temp_data(self):
""" Load all cached resampled daily ISD temperature data. """
return load_cached_isd_daily_temp_data(self.usaf_id)
def load_cached_gsod_daily_temp_data(self):
""" Load all cached resampled daily GSOD temperature data. """
return load_cached_gsod_daily_temp_data(self.usaf_id)
def load_cached_tmy3_hourly_temp_data(self):
""" Load all cached hourly TMY3 temperature data (the year is set to 1900) """
return load_cached_tmy3_hourly_temp_data(self.usaf_id)
def load_cached_cz2010_hourly_temp_data(self):
""" Load all cached hourly TMY3 temperature data (the year is set to 1900) """
return load_cached_cz2010_hourly_temp_data(self.usaf_id)
|
openeemeter/eeweather | eeweather/stations.py | ISDStation.load_isd_hourly_temp_data | python | def load_isd_hourly_temp_data(
self,
start,
end,
read_from_cache=True,
write_to_cache=True,
error_on_missing_years=True,
):
return load_isd_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
error_on_missing_years=error_on_missing_years,
) | Load resampled hourly ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled hourly ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/stations.py#L1394-L1424 | [
"def load_isd_hourly_temp_data(\n usaf_id,\n start,\n end,\n read_from_cache=True,\n write_to_cache=True,\n error_on_missing_years=False,\n):\n\n warnings = []\n # CalTRACK 2.3.3\n if start.tzinfo != pytz.UTC:\n raise NonUTCTimezoneInfoError(start)\n if end.tzinfo != pytz.UTC:\n... | class ISDStation(object):
""" A representation of an Integrated Surface Database weather station.
Contains data about a particular ISD station, as well as methods to pull
data for this station.
Parameters
----------
usaf_id : str
ISD station USAF ID
load_metatdata : bool, optional
Whether or not to auto-load metadata for this station
Attributes
----------
usaf_id : str
ISD station USAF ID
iecc_climate_zone : str
IECC Climate Zone
iecc_moisture_regime : str
IECC Moisture Regime
ba_climate_zone : str
Building America Climate Zone
ca_climate_zone : str
California Building Climate Zone
elevation : float
elevation of station
latitude : float
latitude of station
longitude : float
longitude of station
coords : tuple of (float, float)
lat/long coordinates of station
name : str
name of the station
quality : str
"high", "medium", "low"
wban_ids : list of str
list of WBAN IDs, or "99999" which have been used to identify the station.
recent_wban_id = None
WBAN ID most recently used to identify the station.
climate_zones = {}
dict of all climate zones.
"""
def __init__(self, usaf_id, load_metadata=True):
self.usaf_id = usaf_id
if load_metadata:
self._load_metadata()
else:
valid_usaf_id_or_raise(usaf_id)
self.iecc_climate_zone = None
self.iecc_moisture_regime = None
self.ba_climate_zone = None
self.ca_climate_zone = None
self.elevation = None
self.latitude = None
self.longitude = None
self.coords = None
self.name = None
self.quality = None
self.wban_ids = None
self.recent_wban_id = None
self.climate_zones = {}
def __str__(self):
return self.usaf_id
def __repr__(self):
return "ISDStation('{}')".format(self.usaf_id)
def _load_metadata(self):
metadata = get_isd_station_metadata(self.usaf_id)
def _float_or_none(field):
value = metadata.get(field)
return None if value is None else float(value)
self.iecc_climate_zone = metadata.get("iecc_climate_zone")
self.iecc_moisture_regime = metadata.get("iecc_moisture_regime")
self.ba_climate_zone = metadata.get("ba_climate_zone")
self.ca_climate_zone = metadata.get("ca_climate_zone")
self.icao_code = metadata.get("icao_code")
self.elevation = _float_or_none("elevation") # meters
self.latitude = _float_or_none("latitude")
self.longitude = _float_or_none("longitude")
self.coords = (self.latitude, self.longitude)
self.name = metadata.get("name")
self.quality = metadata.get("quality")
self.wban_ids = metadata.get("wban_ids", "").split(",")
self.recent_wban_id = metadata.get("recent_wban_id")
self.climate_zones = {
"iecc_climate_zone": metadata.get("iecc_climate_zone"),
"iecc_moisture_regime": metadata.get("iecc_moisture_regime"),
"ba_climate_zone": metadata.get("ba_climate_zone"),
"ca_climate_zone": metadata.get("ca_climate_zone"),
}
def json(self):
""" Return a JSON-serializeable object containing station metadata."""
return {
"elevation": self.elevation,
"latitude": self.latitude,
"longitude": self.longitude,
"icao_code": self.icao_code,
"name": self.name,
"quality": self.quality,
"wban_ids": self.wban_ids,
"recent_wban_id": self.recent_wban_id,
"climate_zones": {
"iecc_climate_zone": self.iecc_climate_zone,
"iecc_moisture_regime": self.iecc_moisture_regime,
"ba_climate_zone": self.ba_climate_zone,
"ca_climate_zone": self.ca_climate_zone,
},
}
def get_isd_filenames(self, year=None, with_host=False):
""" Get filenames of raw ISD station data. """
return get_isd_filenames(self.usaf_id, year, with_host=with_host)
def get_gsod_filenames(self, year=None, with_host=False):
""" Get filenames of raw GSOD station data. """
return get_gsod_filenames(self.usaf_id, year, with_host=with_host)
def get_isd_file_metadata(self):
""" Get raw file metadata for the station. """
return get_isd_file_metadata(self.usaf_id)
# fetch raw data
def fetch_isd_raw_temp_data(self, year):
""" Pull raw ISD data for the given year directly from FTP. """
return fetch_isd_raw_temp_data(self.usaf_id, year)
def fetch_gsod_raw_temp_data(self, year):
""" Pull raw GSOD data for the given year directly from FTP. """
return fetch_gsod_raw_temp_data(self.usaf_id, year)
# fetch raw data then frequency-normalize
def fetch_isd_hourly_temp_data(self, year):
""" Pull raw ISD temperature data for the given year directly from FTP and resample to hourly time series. """
return fetch_isd_hourly_temp_data(self.usaf_id, year)
def fetch_isd_daily_temp_data(self, year):
""" Pull raw ISD temperature data for the given year directly from FTP and resample to daily time series. """
return fetch_isd_daily_temp_data(self.usaf_id, year)
def fetch_gsod_daily_temp_data(self, year):
""" Pull raw GSOD temperature data for the given year directly from FTP and resample to daily time series. """
return fetch_gsod_daily_temp_data(self.usaf_id, year)
def fetch_tmy3_hourly_temp_data(self):
""" Pull hourly TMY3 temperature hourly time series directly from NREL. """
return fetch_tmy3_hourly_temp_data(self.usaf_id)
def fetch_cz2010_hourly_temp_data(self):
""" Pull hourly CZ2010 temperature hourly time series from URL. """
return fetch_cz2010_hourly_temp_data(self.usaf_id)
# get key-value store key
def get_isd_hourly_temp_data_cache_key(self, year):
""" Get key used to cache resampled hourly ISD temperature data for the given year. """
return get_isd_hourly_temp_data_cache_key(self.usaf_id, year)
def get_isd_daily_temp_data_cache_key(self, year):
""" Get key used to cache resampled daily ISD temperature data for the given year. """
return get_isd_daily_temp_data_cache_key(self.usaf_id, year)
def get_gsod_daily_temp_data_cache_key(self, year):
""" Get key used to cache resampled daily GSOD temperature data for the given year. """
return get_gsod_daily_temp_data_cache_key(self.usaf_id, year)
def get_tmy3_hourly_temp_data_cache_key(self):
""" Get key used to cache TMY3 weather-normalized temperature data. """
return get_tmy3_hourly_temp_data_cache_key(self.usaf_id)
def get_cz2010_hourly_temp_data_cache_key(self):
""" Get key used to cache CZ2010 weather-normalized temperature data. """
return get_cz2010_hourly_temp_data_cache_key(self.usaf_id)
# is cached data expired? boolean. true if expired or not in cache
def cached_isd_hourly_temp_data_is_expired(self, year):
""" Return True if cache of resampled hourly ISD temperature data has expired or does not exist for the given year. """
return cached_isd_hourly_temp_data_is_expired(self.usaf_id, year)
def cached_isd_daily_temp_data_is_expired(self, year):
""" Return True if cache of resampled daily ISD temperature data has expired or does not exist for the given year. """
return cached_isd_daily_temp_data_is_expired(self.usaf_id, year)
def cached_gsod_daily_temp_data_is_expired(self, year):
""" Return True if cache of resampled daily GSOD temperature data has expired or does not exist for the given year. """
return cached_gsod_daily_temp_data_is_expired(self.usaf_id, year)
# check if data is available and delete data in the cache if it's expired
def validate_isd_hourly_temp_data_cache(self, year):
""" Delete cached resampled hourly ISD temperature data if it has expired for the given year. """
return validate_isd_hourly_temp_data_cache(self.usaf_id, year)
def validate_isd_daily_temp_data_cache(self, year):
""" Delete cached resampled daily ISD temperature data if it has expired for the given year. """
return validate_isd_daily_temp_data_cache(self.usaf_id, year)
def validate_gsod_daily_temp_data_cache(self, year):
""" Delete cached resampled daily GSOD temperature data if it has expired for the given year. """
return validate_gsod_daily_temp_data_cache(self.usaf_id, year)
def validate_tmy3_hourly_temp_data_cache(self):
""" Check if TMY3 data exists in cache. """
return validate_tmy3_hourly_temp_data_cache(self.usaf_id)
def validate_cz2010_hourly_temp_data_cache(self):
""" Check if CZ2010 data exists in cache. """
return validate_cz2010_hourly_temp_data_cache(self.usaf_id)
# pandas time series to json
def serialize_isd_hourly_temp_data(self, ts):
""" Serialize resampled hourly ISD pandas time series as JSON for caching. """
return serialize_isd_hourly_temp_data(ts)
def serialize_isd_daily_temp_data(self, ts):
""" Serialize resampled daily ISD pandas time series as JSON for caching. """
return serialize_isd_daily_temp_data(ts)
def serialize_gsod_daily_temp_data(self, ts):
""" Serialize resampled daily GSOD pandas time series as JSON for caching. """
return serialize_gsod_daily_temp_data(ts)
def serialize_tmy3_hourly_temp_data(self, ts):
""" Serialize hourly TMY3 pandas time series as JSON for caching. """
return serialize_tmy3_hourly_temp_data(ts)
def serialize_cz2010_hourly_temp_data(self, ts):
""" Serialize hourly CZ2010 pandas time series as JSON for caching. """
return serialize_cz2010_hourly_temp_data(ts)
# json to pandas time series
def deserialize_isd_hourly_temp_data(self, data):
""" Deserialize JSON representation of resampled hourly ISD into pandas time series. """
return deserialize_isd_hourly_temp_data(data)
def deserialize_isd_daily_temp_data(self, data):
""" Deserialize JSON representation of resampled daily ISD into pandas time series. """
return deserialize_isd_daily_temp_data(data)
def deserialize_gsod_daily_temp_data(self, data):
""" Deserialize JSON representation of resampled daily GSOD into pandas time series. """
return deserialize_gsod_daily_temp_data(data)
def deserialize_tmy3_hourly_temp_data(self, data):
""" Deserialize JSON representation of hourly TMY3 into pandas time series. """
return deserialize_isd_hourly_temp_data(data)
def deserialize_cz2010_hourly_temp_data(self, data):
""" Deserialize JSON representation of hourly CZ2010 into pandas time series. """
return deserialize_cz2010_hourly_temp_data(data)
# return pandas time series of data from cache
def read_isd_hourly_temp_data_from_cache(self, year):
""" Get cached version of resampled hourly ISD temperature data for given year. """
return read_isd_hourly_temp_data_from_cache(self.usaf_id, year)
def read_isd_daily_temp_data_from_cache(self, year):
""" Get cached version of resampled daily ISD temperature data for given year. """
return read_isd_daily_temp_data_from_cache(self.usaf_id, year)
def read_gsod_daily_temp_data_from_cache(self, year):
""" Get cached version of resampled daily GSOD temperature data for given year. """
return read_gsod_daily_temp_data_from_cache(self.usaf_id, year)
def read_tmy3_hourly_temp_data_from_cache(self):
""" Get cached version of hourly TMY3 temperature data. """
return read_tmy3_hourly_temp_data_from_cache(self.usaf_id)
def read_cz2010_hourly_temp_data_from_cache(self):
""" Get cached version of hourly TMY3 temperature data. """
return read_cz2010_hourly_temp_data_from_cache(self.usaf_id)
# write pandas time series of data to cache for a particular year
def write_isd_hourly_temp_data_to_cache(self, year, ts):
""" Write resampled hourly ISD temperature data to cache for given year. """
return write_isd_hourly_temp_data_to_cache(self.usaf_id, year, ts)
def write_isd_daily_temp_data_to_cache(self, year, ts):
""" Write resampled daily ISD temperature data to cache for given year. """
return write_isd_daily_temp_data_to_cache(self.usaf_id, year, ts)
def write_gsod_daily_temp_data_to_cache(self, year, ts):
""" Write resampled daily GSOD temperature data to cache for given year. """
return write_gsod_daily_temp_data_to_cache(self.usaf_id, year, ts)
def write_tmy3_hourly_temp_data_to_cache(self, ts):
""" Write hourly TMY3 temperature data to cache for given year. """
return write_tmy3_hourly_temp_data_to_cache(self.usaf_id, ts)
def write_cz2010_hourly_temp_data_to_cache(self, ts):
""" Write hourly CZ2010 temperature data to cache for given year. """
return write_cz2010_hourly_temp_data_to_cache(self.usaf_id, ts)
# delete cached data for a particular year
def destroy_cached_isd_hourly_temp_data(self, year):
""" Remove cached resampled hourly ISD temperature data to cache for given year. """
return destroy_cached_isd_hourly_temp_data(self.usaf_id, year)
def destroy_cached_isd_daily_temp_data(self, year):
""" Remove cached resampled daily ISD temperature data to cache for given year. """
return destroy_cached_isd_daily_temp_data(self.usaf_id, year)
def destroy_cached_gsod_daily_temp_data(self, year):
""" Remove cached resampled daily GSOD temperature data to cache for given year. """
return destroy_cached_gsod_daily_temp_data(self.usaf_id, year)
def destroy_cached_tmy3_hourly_temp_data(self):
""" Remove cached hourly TMY3 temperature data to cache. """
return destroy_cached_tmy3_hourly_temp_data(self.usaf_id)
def destroy_cached_cz2010_hourly_temp_data(self):
""" Remove cached hourly CZ2010 temperature data to cache. """
return destroy_cached_cz2010_hourly_temp_data(self.usaf_id)
# load data either from cache if valid or directly from source
def load_isd_hourly_temp_data_cached_proxy(self, year):
""" Load resampled hourly ISD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_isd_hourly_temp_data_cached_proxy(self.usaf_id, year)
def load_isd_daily_temp_data_cached_proxy(self, year):
""" Load resampled daily ISD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_isd_daily_temp_data_cached_proxy(self.usaf_id, year)
def load_gsod_daily_temp_data_cached_proxy(self, year):
""" Load resampled daily GSOD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_gsod_daily_temp_data_cached_proxy(self.usaf_id, year)
def load_tmy3_hourly_temp_data_cached_proxy(self):
""" Load hourly TMY3 temperature data from cache, or if it is expired or hadn't been cached, fetch from NREL. """
return load_tmy3_hourly_temp_data_cached_proxy(self.usaf_id)
def load_cz2010_hourly_temp_data_cached_proxy(self):
""" Load hourly CZ2010 temperature data from cache, or if it is expired or hadn't been cached, fetch from URL. """
return load_cz2010_hourly_temp_data_cached_proxy(self.usaf_id)
# main interface: load data from start date to end date
def load_isd_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load resampled daily ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_isd_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_gsod_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load resampled daily GSOD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily GSOD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_gsod_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_tmy3_hourly_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load hourly TMY3 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly TMY3 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_tmy3_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_cz2010_hourly_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load hourly CZ2010 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly CZ2010 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_cz2010_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
# load all cached data for this station
def load_cached_isd_hourly_temp_data(self):
""" Load all cached resampled hourly ISD temperature data. """
return load_cached_isd_hourly_temp_data(self.usaf_id)
def load_cached_isd_daily_temp_data(self):
""" Load all cached resampled daily ISD temperature data. """
return load_cached_isd_daily_temp_data(self.usaf_id)
def load_cached_gsod_daily_temp_data(self):
""" Load all cached resampled daily GSOD temperature data. """
return load_cached_gsod_daily_temp_data(self.usaf_id)
def load_cached_tmy3_hourly_temp_data(self):
""" Load all cached hourly TMY3 temperature data (the year is set to 1900) """
return load_cached_tmy3_hourly_temp_data(self.usaf_id)
def load_cached_cz2010_hourly_temp_data(self):
""" Load all cached hourly TMY3 temperature data (the year is set to 1900) """
return load_cached_cz2010_hourly_temp_data(self.usaf_id)
|
openeemeter/eeweather | eeweather/stations.py | ISDStation.load_isd_daily_temp_data | python | def load_isd_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
return load_isd_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
) | Load resampled daily ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/stations.py#L1426-L1450 | [
"def load_isd_daily_temp_data(\n usaf_id, start, end, read_from_cache=True, write_to_cache=True\n):\n\n # CalTRACK 2.3.3\n if start.tzinfo != pytz.UTC:\n raise NonUTCTimezoneInfoError(start)\n if end.tzinfo != pytz.UTC:\n raise NonUTCTimezoneInfoError(start)\n data = [\n load_isd... | class ISDStation(object):
""" A representation of an Integrated Surface Database weather station.
Contains data about a particular ISD station, as well as methods to pull
data for this station.
Parameters
----------
usaf_id : str
ISD station USAF ID
load_metatdata : bool, optional
Whether or not to auto-load metadata for this station
Attributes
----------
usaf_id : str
ISD station USAF ID
iecc_climate_zone : str
IECC Climate Zone
iecc_moisture_regime : str
IECC Moisture Regime
ba_climate_zone : str
Building America Climate Zone
ca_climate_zone : str
California Building Climate Zone
elevation : float
elevation of station
latitude : float
latitude of station
longitude : float
longitude of station
coords : tuple of (float, float)
lat/long coordinates of station
name : str
name of the station
quality : str
"high", "medium", "low"
wban_ids : list of str
list of WBAN IDs, or "99999" which have been used to identify the station.
recent_wban_id = None
WBAN ID most recently used to identify the station.
climate_zones = {}
dict of all climate zones.
"""
def __init__(self, usaf_id, load_metadata=True):
self.usaf_id = usaf_id
if load_metadata:
self._load_metadata()
else:
valid_usaf_id_or_raise(usaf_id)
self.iecc_climate_zone = None
self.iecc_moisture_regime = None
self.ba_climate_zone = None
self.ca_climate_zone = None
self.elevation = None
self.latitude = None
self.longitude = None
self.coords = None
self.name = None
self.quality = None
self.wban_ids = None
self.recent_wban_id = None
self.climate_zones = {}
def __str__(self):
return self.usaf_id
def __repr__(self):
return "ISDStation('{}')".format(self.usaf_id)
def _load_metadata(self):
metadata = get_isd_station_metadata(self.usaf_id)
def _float_or_none(field):
value = metadata.get(field)
return None if value is None else float(value)
self.iecc_climate_zone = metadata.get("iecc_climate_zone")
self.iecc_moisture_regime = metadata.get("iecc_moisture_regime")
self.ba_climate_zone = metadata.get("ba_climate_zone")
self.ca_climate_zone = metadata.get("ca_climate_zone")
self.icao_code = metadata.get("icao_code")
self.elevation = _float_or_none("elevation") # meters
self.latitude = _float_or_none("latitude")
self.longitude = _float_or_none("longitude")
self.coords = (self.latitude, self.longitude)
self.name = metadata.get("name")
self.quality = metadata.get("quality")
self.wban_ids = metadata.get("wban_ids", "").split(",")
self.recent_wban_id = metadata.get("recent_wban_id")
self.climate_zones = {
"iecc_climate_zone": metadata.get("iecc_climate_zone"),
"iecc_moisture_regime": metadata.get("iecc_moisture_regime"),
"ba_climate_zone": metadata.get("ba_climate_zone"),
"ca_climate_zone": metadata.get("ca_climate_zone"),
}
def json(self):
""" Return a JSON-serializeable object containing station metadata."""
return {
"elevation": self.elevation,
"latitude": self.latitude,
"longitude": self.longitude,
"icao_code": self.icao_code,
"name": self.name,
"quality": self.quality,
"wban_ids": self.wban_ids,
"recent_wban_id": self.recent_wban_id,
"climate_zones": {
"iecc_climate_zone": self.iecc_climate_zone,
"iecc_moisture_regime": self.iecc_moisture_regime,
"ba_climate_zone": self.ba_climate_zone,
"ca_climate_zone": self.ca_climate_zone,
},
}
def get_isd_filenames(self, year=None, with_host=False):
""" Get filenames of raw ISD station data. """
return get_isd_filenames(self.usaf_id, year, with_host=with_host)
def get_gsod_filenames(self, year=None, with_host=False):
""" Get filenames of raw GSOD station data. """
return get_gsod_filenames(self.usaf_id, year, with_host=with_host)
def get_isd_file_metadata(self):
""" Get raw file metadata for the station. """
return get_isd_file_metadata(self.usaf_id)
# fetch raw data
def fetch_isd_raw_temp_data(self, year):
""" Pull raw ISD data for the given year directly from FTP. """
return fetch_isd_raw_temp_data(self.usaf_id, year)
def fetch_gsod_raw_temp_data(self, year):
""" Pull raw GSOD data for the given year directly from FTP. """
return fetch_gsod_raw_temp_data(self.usaf_id, year)
# fetch raw data then frequency-normalize
def fetch_isd_hourly_temp_data(self, year):
""" Pull raw ISD temperature data for the given year directly from FTP and resample to hourly time series. """
return fetch_isd_hourly_temp_data(self.usaf_id, year)
def fetch_isd_daily_temp_data(self, year):
""" Pull raw ISD temperature data for the given year directly from FTP and resample to daily time series. """
return fetch_isd_daily_temp_data(self.usaf_id, year)
def fetch_gsod_daily_temp_data(self, year):
""" Pull raw GSOD temperature data for the given year directly from FTP and resample to daily time series. """
return fetch_gsod_daily_temp_data(self.usaf_id, year)
def fetch_tmy3_hourly_temp_data(self):
""" Pull hourly TMY3 temperature hourly time series directly from NREL. """
return fetch_tmy3_hourly_temp_data(self.usaf_id)
def fetch_cz2010_hourly_temp_data(self):
""" Pull hourly CZ2010 temperature hourly time series from URL. """
return fetch_cz2010_hourly_temp_data(self.usaf_id)
# get key-value store key
def get_isd_hourly_temp_data_cache_key(self, year):
""" Get key used to cache resampled hourly ISD temperature data for the given year. """
return get_isd_hourly_temp_data_cache_key(self.usaf_id, year)
def get_isd_daily_temp_data_cache_key(self, year):
""" Get key used to cache resampled daily ISD temperature data for the given year. """
return get_isd_daily_temp_data_cache_key(self.usaf_id, year)
def get_gsod_daily_temp_data_cache_key(self, year):
""" Get key used to cache resampled daily GSOD temperature data for the given year. """
return get_gsod_daily_temp_data_cache_key(self.usaf_id, year)
def get_tmy3_hourly_temp_data_cache_key(self):
""" Get key used to cache TMY3 weather-normalized temperature data. """
return get_tmy3_hourly_temp_data_cache_key(self.usaf_id)
def get_cz2010_hourly_temp_data_cache_key(self):
""" Get key used to cache CZ2010 weather-normalized temperature data. """
return get_cz2010_hourly_temp_data_cache_key(self.usaf_id)
# is cached data expired? boolean. true if expired or not in cache
def cached_isd_hourly_temp_data_is_expired(self, year):
""" Return True if cache of resampled hourly ISD temperature data has expired or does not exist for the given year. """
return cached_isd_hourly_temp_data_is_expired(self.usaf_id, year)
def cached_isd_daily_temp_data_is_expired(self, year):
""" Return True if cache of resampled daily ISD temperature data has expired or does not exist for the given year. """
return cached_isd_daily_temp_data_is_expired(self.usaf_id, year)
def cached_gsod_daily_temp_data_is_expired(self, year):
""" Return True if cache of resampled daily GSOD temperature data has expired or does not exist for the given year. """
return cached_gsod_daily_temp_data_is_expired(self.usaf_id, year)
# check if data is available and delete data in the cache if it's expired
def validate_isd_hourly_temp_data_cache(self, year):
""" Delete cached resampled hourly ISD temperature data if it has expired for the given year. """
return validate_isd_hourly_temp_data_cache(self.usaf_id, year)
def validate_isd_daily_temp_data_cache(self, year):
""" Delete cached resampled daily ISD temperature data if it has expired for the given year. """
return validate_isd_daily_temp_data_cache(self.usaf_id, year)
def validate_gsod_daily_temp_data_cache(self, year):
""" Delete cached resampled daily GSOD temperature data if it has expired for the given year. """
return validate_gsod_daily_temp_data_cache(self.usaf_id, year)
def validate_tmy3_hourly_temp_data_cache(self):
""" Check if TMY3 data exists in cache. """
return validate_tmy3_hourly_temp_data_cache(self.usaf_id)
def validate_cz2010_hourly_temp_data_cache(self):
""" Check if CZ2010 data exists in cache. """
return validate_cz2010_hourly_temp_data_cache(self.usaf_id)
# pandas time series to json
def serialize_isd_hourly_temp_data(self, ts):
""" Serialize resampled hourly ISD pandas time series as JSON for caching. """
return serialize_isd_hourly_temp_data(ts)
def serialize_isd_daily_temp_data(self, ts):
""" Serialize resampled daily ISD pandas time series as JSON for caching. """
return serialize_isd_daily_temp_data(ts)
def serialize_gsod_daily_temp_data(self, ts):
""" Serialize resampled daily GSOD pandas time series as JSON for caching. """
return serialize_gsod_daily_temp_data(ts)
def serialize_tmy3_hourly_temp_data(self, ts):
""" Serialize hourly TMY3 pandas time series as JSON for caching. """
return serialize_tmy3_hourly_temp_data(ts)
def serialize_cz2010_hourly_temp_data(self, ts):
""" Serialize hourly CZ2010 pandas time series as JSON for caching. """
return serialize_cz2010_hourly_temp_data(ts)
# json to pandas time series
def deserialize_isd_hourly_temp_data(self, data):
""" Deserialize JSON representation of resampled hourly ISD into pandas time series. """
return deserialize_isd_hourly_temp_data(data)
def deserialize_isd_daily_temp_data(self, data):
""" Deserialize JSON representation of resampled daily ISD into pandas time series. """
return deserialize_isd_daily_temp_data(data)
def deserialize_gsod_daily_temp_data(self, data):
""" Deserialize JSON representation of resampled daily GSOD into pandas time series. """
return deserialize_gsod_daily_temp_data(data)
def deserialize_tmy3_hourly_temp_data(self, data):
""" Deserialize JSON representation of hourly TMY3 into pandas time series. """
return deserialize_isd_hourly_temp_data(data)
def deserialize_cz2010_hourly_temp_data(self, data):
""" Deserialize JSON representation of hourly CZ2010 into pandas time series. """
return deserialize_cz2010_hourly_temp_data(data)
# return pandas time series of data from cache
def read_isd_hourly_temp_data_from_cache(self, year):
""" Get cached version of resampled hourly ISD temperature data for given year. """
return read_isd_hourly_temp_data_from_cache(self.usaf_id, year)
def read_isd_daily_temp_data_from_cache(self, year):
""" Get cached version of resampled daily ISD temperature data for given year. """
return read_isd_daily_temp_data_from_cache(self.usaf_id, year)
def read_gsod_daily_temp_data_from_cache(self, year):
""" Get cached version of resampled daily GSOD temperature data for given year. """
return read_gsod_daily_temp_data_from_cache(self.usaf_id, year)
def read_tmy3_hourly_temp_data_from_cache(self):
""" Get cached version of hourly TMY3 temperature data. """
return read_tmy3_hourly_temp_data_from_cache(self.usaf_id)
def read_cz2010_hourly_temp_data_from_cache(self):
""" Get cached version of hourly TMY3 temperature data. """
return read_cz2010_hourly_temp_data_from_cache(self.usaf_id)
# write pandas time series of data to cache for a particular year
def write_isd_hourly_temp_data_to_cache(self, year, ts):
""" Write resampled hourly ISD temperature data to cache for given year. """
return write_isd_hourly_temp_data_to_cache(self.usaf_id, year, ts)
def write_isd_daily_temp_data_to_cache(self, year, ts):
""" Write resampled daily ISD temperature data to cache for given year. """
return write_isd_daily_temp_data_to_cache(self.usaf_id, year, ts)
def write_gsod_daily_temp_data_to_cache(self, year, ts):
""" Write resampled daily GSOD temperature data to cache for given year. """
return write_gsod_daily_temp_data_to_cache(self.usaf_id, year, ts)
def write_tmy3_hourly_temp_data_to_cache(self, ts):
""" Write hourly TMY3 temperature data to cache for given year. """
return write_tmy3_hourly_temp_data_to_cache(self.usaf_id, ts)
def write_cz2010_hourly_temp_data_to_cache(self, ts):
""" Write hourly CZ2010 temperature data to cache for given year. """
return write_cz2010_hourly_temp_data_to_cache(self.usaf_id, ts)
# delete cached data for a particular year
def destroy_cached_isd_hourly_temp_data(self, year):
""" Remove cached resampled hourly ISD temperature data to cache for given year. """
return destroy_cached_isd_hourly_temp_data(self.usaf_id, year)
def destroy_cached_isd_daily_temp_data(self, year):
""" Remove cached resampled daily ISD temperature data to cache for given year. """
return destroy_cached_isd_daily_temp_data(self.usaf_id, year)
def destroy_cached_gsod_daily_temp_data(self, year):
""" Remove cached resampled daily GSOD temperature data to cache for given year. """
return destroy_cached_gsod_daily_temp_data(self.usaf_id, year)
def destroy_cached_tmy3_hourly_temp_data(self):
""" Remove cached hourly TMY3 temperature data to cache. """
return destroy_cached_tmy3_hourly_temp_data(self.usaf_id)
def destroy_cached_cz2010_hourly_temp_data(self):
""" Remove cached hourly CZ2010 temperature data to cache. """
return destroy_cached_cz2010_hourly_temp_data(self.usaf_id)
# load data either from cache if valid or directly from source
def load_isd_hourly_temp_data_cached_proxy(self, year):
""" Load resampled hourly ISD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_isd_hourly_temp_data_cached_proxy(self.usaf_id, year)
def load_isd_daily_temp_data_cached_proxy(self, year):
""" Load resampled daily ISD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_isd_daily_temp_data_cached_proxy(self.usaf_id, year)
def load_gsod_daily_temp_data_cached_proxy(self, year):
""" Load resampled daily GSOD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_gsod_daily_temp_data_cached_proxy(self.usaf_id, year)
def load_tmy3_hourly_temp_data_cached_proxy(self):
""" Load hourly TMY3 temperature data from cache, or if it is expired or hadn't been cached, fetch from NREL. """
return load_tmy3_hourly_temp_data_cached_proxy(self.usaf_id)
def load_cz2010_hourly_temp_data_cached_proxy(self):
""" Load hourly CZ2010 temperature data from cache, or if it is expired or hadn't been cached, fetch from URL. """
return load_cz2010_hourly_temp_data_cached_proxy(self.usaf_id)
# main interface: load data from start date to end date
def load_isd_hourly_temp_data(
self,
start,
end,
read_from_cache=True,
write_to_cache=True,
error_on_missing_years=True,
):
""" Load resampled hourly ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled hourly ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_isd_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
error_on_missing_years=error_on_missing_years,
)
def load_gsod_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load resampled daily GSOD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily GSOD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_gsod_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_tmy3_hourly_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load hourly TMY3 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly TMY3 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_tmy3_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_cz2010_hourly_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load hourly CZ2010 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly CZ2010 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_cz2010_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
# load all cached data for this station
def load_cached_isd_hourly_temp_data(self):
""" Load all cached resampled hourly ISD temperature data. """
return load_cached_isd_hourly_temp_data(self.usaf_id)
def load_cached_isd_daily_temp_data(self):
""" Load all cached resampled daily ISD temperature data. """
return load_cached_isd_daily_temp_data(self.usaf_id)
def load_cached_gsod_daily_temp_data(self):
""" Load all cached resampled daily GSOD temperature data. """
return load_cached_gsod_daily_temp_data(self.usaf_id)
def load_cached_tmy3_hourly_temp_data(self):
""" Load all cached hourly TMY3 temperature data (the year is set to 1900) """
return load_cached_tmy3_hourly_temp_data(self.usaf_id)
def load_cached_cz2010_hourly_temp_data(self):
""" Load all cached hourly TMY3 temperature data (the year is set to 1900) """
return load_cached_cz2010_hourly_temp_data(self.usaf_id)
|
openeemeter/eeweather | eeweather/stations.py | ISDStation.load_gsod_daily_temp_data | python | def load_gsod_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
return load_gsod_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
) | Load resampled daily GSOD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily GSOD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/stations.py#L1452-L1476 | [
"def load_gsod_daily_temp_data(\n usaf_id, start, end, read_from_cache=True, write_to_cache=True\n):\n\n # CalTRACK 2.3.3\n if start.tzinfo != pytz.UTC:\n raise NonUTCTimezoneInfoError(start)\n if end.tzinfo != pytz.UTC:\n raise NonUTCTimezoneInfoError(start)\n data = [\n load_gs... | class ISDStation(object):
""" A representation of an Integrated Surface Database weather station.
Contains data about a particular ISD station, as well as methods to pull
data for this station.
Parameters
----------
usaf_id : str
ISD station USAF ID
load_metatdata : bool, optional
Whether or not to auto-load metadata for this station
Attributes
----------
usaf_id : str
ISD station USAF ID
iecc_climate_zone : str
IECC Climate Zone
iecc_moisture_regime : str
IECC Moisture Regime
ba_climate_zone : str
Building America Climate Zone
ca_climate_zone : str
California Building Climate Zone
elevation : float
elevation of station
latitude : float
latitude of station
longitude : float
longitude of station
coords : tuple of (float, float)
lat/long coordinates of station
name : str
name of the station
quality : str
"high", "medium", "low"
wban_ids : list of str
list of WBAN IDs, or "99999" which have been used to identify the station.
recent_wban_id = None
WBAN ID most recently used to identify the station.
climate_zones = {}
dict of all climate zones.
"""
def __init__(self, usaf_id, load_metadata=True):
self.usaf_id = usaf_id
if load_metadata:
self._load_metadata()
else:
valid_usaf_id_or_raise(usaf_id)
self.iecc_climate_zone = None
self.iecc_moisture_regime = None
self.ba_climate_zone = None
self.ca_climate_zone = None
self.elevation = None
self.latitude = None
self.longitude = None
self.coords = None
self.name = None
self.quality = None
self.wban_ids = None
self.recent_wban_id = None
self.climate_zones = {}
def __str__(self):
return self.usaf_id
def __repr__(self):
return "ISDStation('{}')".format(self.usaf_id)
def _load_metadata(self):
metadata = get_isd_station_metadata(self.usaf_id)
def _float_or_none(field):
value = metadata.get(field)
return None if value is None else float(value)
self.iecc_climate_zone = metadata.get("iecc_climate_zone")
self.iecc_moisture_regime = metadata.get("iecc_moisture_regime")
self.ba_climate_zone = metadata.get("ba_climate_zone")
self.ca_climate_zone = metadata.get("ca_climate_zone")
self.icao_code = metadata.get("icao_code")
self.elevation = _float_or_none("elevation") # meters
self.latitude = _float_or_none("latitude")
self.longitude = _float_or_none("longitude")
self.coords = (self.latitude, self.longitude)
self.name = metadata.get("name")
self.quality = metadata.get("quality")
self.wban_ids = metadata.get("wban_ids", "").split(",")
self.recent_wban_id = metadata.get("recent_wban_id")
self.climate_zones = {
"iecc_climate_zone": metadata.get("iecc_climate_zone"),
"iecc_moisture_regime": metadata.get("iecc_moisture_regime"),
"ba_climate_zone": metadata.get("ba_climate_zone"),
"ca_climate_zone": metadata.get("ca_climate_zone"),
}
def json(self):
""" Return a JSON-serializeable object containing station metadata."""
return {
"elevation": self.elevation,
"latitude": self.latitude,
"longitude": self.longitude,
"icao_code": self.icao_code,
"name": self.name,
"quality": self.quality,
"wban_ids": self.wban_ids,
"recent_wban_id": self.recent_wban_id,
"climate_zones": {
"iecc_climate_zone": self.iecc_climate_zone,
"iecc_moisture_regime": self.iecc_moisture_regime,
"ba_climate_zone": self.ba_climate_zone,
"ca_climate_zone": self.ca_climate_zone,
},
}
def get_isd_filenames(self, year=None, with_host=False):
""" Get filenames of raw ISD station data. """
return get_isd_filenames(self.usaf_id, year, with_host=with_host)
def get_gsod_filenames(self, year=None, with_host=False):
""" Get filenames of raw GSOD station data. """
return get_gsod_filenames(self.usaf_id, year, with_host=with_host)
def get_isd_file_metadata(self):
""" Get raw file metadata for the station. """
return get_isd_file_metadata(self.usaf_id)
# fetch raw data
def fetch_isd_raw_temp_data(self, year):
""" Pull raw ISD data for the given year directly from FTP. """
return fetch_isd_raw_temp_data(self.usaf_id, year)
def fetch_gsod_raw_temp_data(self, year):
""" Pull raw GSOD data for the given year directly from FTP. """
return fetch_gsod_raw_temp_data(self.usaf_id, year)
# fetch raw data then frequency-normalize
def fetch_isd_hourly_temp_data(self, year):
""" Pull raw ISD temperature data for the given year directly from FTP and resample to hourly time series. """
return fetch_isd_hourly_temp_data(self.usaf_id, year)
def fetch_isd_daily_temp_data(self, year):
""" Pull raw ISD temperature data for the given year directly from FTP and resample to daily time series. """
return fetch_isd_daily_temp_data(self.usaf_id, year)
def fetch_gsod_daily_temp_data(self, year):
""" Pull raw GSOD temperature data for the given year directly from FTP and resample to daily time series. """
return fetch_gsod_daily_temp_data(self.usaf_id, year)
def fetch_tmy3_hourly_temp_data(self):
""" Pull hourly TMY3 temperature hourly time series directly from NREL. """
return fetch_tmy3_hourly_temp_data(self.usaf_id)
def fetch_cz2010_hourly_temp_data(self):
""" Pull hourly CZ2010 temperature hourly time series from URL. """
return fetch_cz2010_hourly_temp_data(self.usaf_id)
# get key-value store key
def get_isd_hourly_temp_data_cache_key(self, year):
""" Get key used to cache resampled hourly ISD temperature data for the given year. """
return get_isd_hourly_temp_data_cache_key(self.usaf_id, year)
def get_isd_daily_temp_data_cache_key(self, year):
""" Get key used to cache resampled daily ISD temperature data for the given year. """
return get_isd_daily_temp_data_cache_key(self.usaf_id, year)
def get_gsod_daily_temp_data_cache_key(self, year):
""" Get key used to cache resampled daily GSOD temperature data for the given year. """
return get_gsod_daily_temp_data_cache_key(self.usaf_id, year)
def get_tmy3_hourly_temp_data_cache_key(self):
""" Get key used to cache TMY3 weather-normalized temperature data. """
return get_tmy3_hourly_temp_data_cache_key(self.usaf_id)
def get_cz2010_hourly_temp_data_cache_key(self):
""" Get key used to cache CZ2010 weather-normalized temperature data. """
return get_cz2010_hourly_temp_data_cache_key(self.usaf_id)
# is cached data expired? boolean. true if expired or not in cache
def cached_isd_hourly_temp_data_is_expired(self, year):
""" Return True if cache of resampled hourly ISD temperature data has expired or does not exist for the given year. """
return cached_isd_hourly_temp_data_is_expired(self.usaf_id, year)
def cached_isd_daily_temp_data_is_expired(self, year):
""" Return True if cache of resampled daily ISD temperature data has expired or does not exist for the given year. """
return cached_isd_daily_temp_data_is_expired(self.usaf_id, year)
def cached_gsod_daily_temp_data_is_expired(self, year):
""" Return True if cache of resampled daily GSOD temperature data has expired or does not exist for the given year. """
return cached_gsod_daily_temp_data_is_expired(self.usaf_id, year)
# check if data is available and delete data in the cache if it's expired
def validate_isd_hourly_temp_data_cache(self, year):
""" Delete cached resampled hourly ISD temperature data if it has expired for the given year. """
return validate_isd_hourly_temp_data_cache(self.usaf_id, year)
def validate_isd_daily_temp_data_cache(self, year):
""" Delete cached resampled daily ISD temperature data if it has expired for the given year. """
return validate_isd_daily_temp_data_cache(self.usaf_id, year)
def validate_gsod_daily_temp_data_cache(self, year):
""" Delete cached resampled daily GSOD temperature data if it has expired for the given year. """
return validate_gsod_daily_temp_data_cache(self.usaf_id, year)
def validate_tmy3_hourly_temp_data_cache(self):
""" Check if TMY3 data exists in cache. """
return validate_tmy3_hourly_temp_data_cache(self.usaf_id)
def validate_cz2010_hourly_temp_data_cache(self):
""" Check if CZ2010 data exists in cache. """
return validate_cz2010_hourly_temp_data_cache(self.usaf_id)
# pandas time series to json
def serialize_isd_hourly_temp_data(self, ts):
""" Serialize resampled hourly ISD pandas time series as JSON for caching. """
return serialize_isd_hourly_temp_data(ts)
def serialize_isd_daily_temp_data(self, ts):
""" Serialize resampled daily ISD pandas time series as JSON for caching. """
return serialize_isd_daily_temp_data(ts)
def serialize_gsod_daily_temp_data(self, ts):
""" Serialize resampled daily GSOD pandas time series as JSON for caching. """
return serialize_gsod_daily_temp_data(ts)
def serialize_tmy3_hourly_temp_data(self, ts):
""" Serialize hourly TMY3 pandas time series as JSON for caching. """
return serialize_tmy3_hourly_temp_data(ts)
def serialize_cz2010_hourly_temp_data(self, ts):
""" Serialize hourly CZ2010 pandas time series as JSON for caching. """
return serialize_cz2010_hourly_temp_data(ts)
# json to pandas time series
def deserialize_isd_hourly_temp_data(self, data):
""" Deserialize JSON representation of resampled hourly ISD into pandas time series. """
return deserialize_isd_hourly_temp_data(data)
def deserialize_isd_daily_temp_data(self, data):
""" Deserialize JSON representation of resampled daily ISD into pandas time series. """
return deserialize_isd_daily_temp_data(data)
def deserialize_gsod_daily_temp_data(self, data):
""" Deserialize JSON representation of resampled daily GSOD into pandas time series. """
return deserialize_gsod_daily_temp_data(data)
def deserialize_tmy3_hourly_temp_data(self, data):
""" Deserialize JSON representation of hourly TMY3 into pandas time series. """
return deserialize_isd_hourly_temp_data(data)
def deserialize_cz2010_hourly_temp_data(self, data):
""" Deserialize JSON representation of hourly CZ2010 into pandas time series. """
return deserialize_cz2010_hourly_temp_data(data)
# return pandas time series of data from cache
def read_isd_hourly_temp_data_from_cache(self, year):
""" Get cached version of resampled hourly ISD temperature data for given year. """
return read_isd_hourly_temp_data_from_cache(self.usaf_id, year)
def read_isd_daily_temp_data_from_cache(self, year):
""" Get cached version of resampled daily ISD temperature data for given year. """
return read_isd_daily_temp_data_from_cache(self.usaf_id, year)
def read_gsod_daily_temp_data_from_cache(self, year):
""" Get cached version of resampled daily GSOD temperature data for given year. """
return read_gsod_daily_temp_data_from_cache(self.usaf_id, year)
def read_tmy3_hourly_temp_data_from_cache(self):
""" Get cached version of hourly TMY3 temperature data. """
return read_tmy3_hourly_temp_data_from_cache(self.usaf_id)
def read_cz2010_hourly_temp_data_from_cache(self):
""" Get cached version of hourly TMY3 temperature data. """
return read_cz2010_hourly_temp_data_from_cache(self.usaf_id)
# write pandas time series of data to cache for a particular year
def write_isd_hourly_temp_data_to_cache(self, year, ts):
""" Write resampled hourly ISD temperature data to cache for given year. """
return write_isd_hourly_temp_data_to_cache(self.usaf_id, year, ts)
def write_isd_daily_temp_data_to_cache(self, year, ts):
""" Write resampled daily ISD temperature data to cache for given year. """
return write_isd_daily_temp_data_to_cache(self.usaf_id, year, ts)
def write_gsod_daily_temp_data_to_cache(self, year, ts):
""" Write resampled daily GSOD temperature data to cache for given year. """
return write_gsod_daily_temp_data_to_cache(self.usaf_id, year, ts)
def write_tmy3_hourly_temp_data_to_cache(self, ts):
""" Write hourly TMY3 temperature data to cache for given year. """
return write_tmy3_hourly_temp_data_to_cache(self.usaf_id, ts)
def write_cz2010_hourly_temp_data_to_cache(self, ts):
""" Write hourly CZ2010 temperature data to cache for given year. """
return write_cz2010_hourly_temp_data_to_cache(self.usaf_id, ts)
# delete cached data for a particular year
def destroy_cached_isd_hourly_temp_data(self, year):
""" Remove cached resampled hourly ISD temperature data to cache for given year. """
return destroy_cached_isd_hourly_temp_data(self.usaf_id, year)
def destroy_cached_isd_daily_temp_data(self, year):
""" Remove cached resampled daily ISD temperature data to cache for given year. """
return destroy_cached_isd_daily_temp_data(self.usaf_id, year)
def destroy_cached_gsod_daily_temp_data(self, year):
""" Remove cached resampled daily GSOD temperature data to cache for given year. """
return destroy_cached_gsod_daily_temp_data(self.usaf_id, year)
def destroy_cached_tmy3_hourly_temp_data(self):
""" Remove cached hourly TMY3 temperature data to cache. """
return destroy_cached_tmy3_hourly_temp_data(self.usaf_id)
def destroy_cached_cz2010_hourly_temp_data(self):
""" Remove cached hourly CZ2010 temperature data to cache. """
return destroy_cached_cz2010_hourly_temp_data(self.usaf_id)
# load data either from cache if valid or directly from source
def load_isd_hourly_temp_data_cached_proxy(self, year):
""" Load resampled hourly ISD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_isd_hourly_temp_data_cached_proxy(self.usaf_id, year)
def load_isd_daily_temp_data_cached_proxy(self, year):
""" Load resampled daily ISD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_isd_daily_temp_data_cached_proxy(self.usaf_id, year)
def load_gsod_daily_temp_data_cached_proxy(self, year):
""" Load resampled daily GSOD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_gsod_daily_temp_data_cached_proxy(self.usaf_id, year)
def load_tmy3_hourly_temp_data_cached_proxy(self):
""" Load hourly TMY3 temperature data from cache, or if it is expired or hadn't been cached, fetch from NREL. """
return load_tmy3_hourly_temp_data_cached_proxy(self.usaf_id)
def load_cz2010_hourly_temp_data_cached_proxy(self):
""" Load hourly CZ2010 temperature data from cache, or if it is expired or hadn't been cached, fetch from URL. """
return load_cz2010_hourly_temp_data_cached_proxy(self.usaf_id)
# main interface: load data from start date to end date
def load_isd_hourly_temp_data(
self,
start,
end,
read_from_cache=True,
write_to_cache=True,
error_on_missing_years=True,
):
""" Load resampled hourly ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled hourly ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_isd_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
error_on_missing_years=error_on_missing_years,
)
def load_isd_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load resampled daily ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_isd_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_tmy3_hourly_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load hourly TMY3 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly TMY3 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_tmy3_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_cz2010_hourly_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load hourly CZ2010 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly CZ2010 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_cz2010_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
# load all cached data for this station
def load_cached_isd_hourly_temp_data(self):
""" Load all cached resampled hourly ISD temperature data. """
return load_cached_isd_hourly_temp_data(self.usaf_id)
def load_cached_isd_daily_temp_data(self):
""" Load all cached resampled daily ISD temperature data. """
return load_cached_isd_daily_temp_data(self.usaf_id)
def load_cached_gsod_daily_temp_data(self):
""" Load all cached resampled daily GSOD temperature data. """
return load_cached_gsod_daily_temp_data(self.usaf_id)
def load_cached_tmy3_hourly_temp_data(self):
""" Load all cached hourly TMY3 temperature data (the year is set to 1900) """
return load_cached_tmy3_hourly_temp_data(self.usaf_id)
def load_cached_cz2010_hourly_temp_data(self):
""" Load all cached hourly TMY3 temperature data (the year is set to 1900) """
return load_cached_cz2010_hourly_temp_data(self.usaf_id)
|
openeemeter/eeweather | eeweather/stations.py | ISDStation.load_tmy3_hourly_temp_data | python | def load_tmy3_hourly_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
return load_tmy3_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
) | Load hourly TMY3 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly TMY3 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/stations.py#L1478-L1502 | [
"def load_tmy3_hourly_temp_data(\n usaf_id, start, end, read_from_cache=True, write_to_cache=True\n):\n\n # CalTRACK 2.3.3\n if start.tzinfo != pytz.UTC:\n raise NonUTCTimezoneInfoError(start)\n if end.tzinfo != pytz.UTC:\n raise NonUTCTimezoneInfoError(start)\n single_year_data = load_... | class ISDStation(object):
""" A representation of an Integrated Surface Database weather station.
Contains data about a particular ISD station, as well as methods to pull
data for this station.
Parameters
----------
usaf_id : str
ISD station USAF ID
load_metatdata : bool, optional
Whether or not to auto-load metadata for this station
Attributes
----------
usaf_id : str
ISD station USAF ID
iecc_climate_zone : str
IECC Climate Zone
iecc_moisture_regime : str
IECC Moisture Regime
ba_climate_zone : str
Building America Climate Zone
ca_climate_zone : str
California Building Climate Zone
elevation : float
elevation of station
latitude : float
latitude of station
longitude : float
longitude of station
coords : tuple of (float, float)
lat/long coordinates of station
name : str
name of the station
quality : str
"high", "medium", "low"
wban_ids : list of str
list of WBAN IDs, or "99999" which have been used to identify the station.
recent_wban_id = None
WBAN ID most recently used to identify the station.
climate_zones = {}
dict of all climate zones.
"""
def __init__(self, usaf_id, load_metadata=True):
self.usaf_id = usaf_id
if load_metadata:
self._load_metadata()
else:
valid_usaf_id_or_raise(usaf_id)
self.iecc_climate_zone = None
self.iecc_moisture_regime = None
self.ba_climate_zone = None
self.ca_climate_zone = None
self.elevation = None
self.latitude = None
self.longitude = None
self.coords = None
self.name = None
self.quality = None
self.wban_ids = None
self.recent_wban_id = None
self.climate_zones = {}
def __str__(self):
return self.usaf_id
def __repr__(self):
return "ISDStation('{}')".format(self.usaf_id)
def _load_metadata(self):
metadata = get_isd_station_metadata(self.usaf_id)
def _float_or_none(field):
value = metadata.get(field)
return None if value is None else float(value)
self.iecc_climate_zone = metadata.get("iecc_climate_zone")
self.iecc_moisture_regime = metadata.get("iecc_moisture_regime")
self.ba_climate_zone = metadata.get("ba_climate_zone")
self.ca_climate_zone = metadata.get("ca_climate_zone")
self.icao_code = metadata.get("icao_code")
self.elevation = _float_or_none("elevation") # meters
self.latitude = _float_or_none("latitude")
self.longitude = _float_or_none("longitude")
self.coords = (self.latitude, self.longitude)
self.name = metadata.get("name")
self.quality = metadata.get("quality")
self.wban_ids = metadata.get("wban_ids", "").split(",")
self.recent_wban_id = metadata.get("recent_wban_id")
self.climate_zones = {
"iecc_climate_zone": metadata.get("iecc_climate_zone"),
"iecc_moisture_regime": metadata.get("iecc_moisture_regime"),
"ba_climate_zone": metadata.get("ba_climate_zone"),
"ca_climate_zone": metadata.get("ca_climate_zone"),
}
def json(self):
""" Return a JSON-serializeable object containing station metadata."""
return {
"elevation": self.elevation,
"latitude": self.latitude,
"longitude": self.longitude,
"icao_code": self.icao_code,
"name": self.name,
"quality": self.quality,
"wban_ids": self.wban_ids,
"recent_wban_id": self.recent_wban_id,
"climate_zones": {
"iecc_climate_zone": self.iecc_climate_zone,
"iecc_moisture_regime": self.iecc_moisture_regime,
"ba_climate_zone": self.ba_climate_zone,
"ca_climate_zone": self.ca_climate_zone,
},
}
def get_isd_filenames(self, year=None, with_host=False):
""" Get filenames of raw ISD station data. """
return get_isd_filenames(self.usaf_id, year, with_host=with_host)
def get_gsod_filenames(self, year=None, with_host=False):
""" Get filenames of raw GSOD station data. """
return get_gsod_filenames(self.usaf_id, year, with_host=with_host)
def get_isd_file_metadata(self):
""" Get raw file metadata for the station. """
return get_isd_file_metadata(self.usaf_id)
# fetch raw data
def fetch_isd_raw_temp_data(self, year):
""" Pull raw ISD data for the given year directly from FTP. """
return fetch_isd_raw_temp_data(self.usaf_id, year)
def fetch_gsod_raw_temp_data(self, year):
""" Pull raw GSOD data for the given year directly from FTP. """
return fetch_gsod_raw_temp_data(self.usaf_id, year)
# fetch raw data then frequency-normalize
def fetch_isd_hourly_temp_data(self, year):
""" Pull raw ISD temperature data for the given year directly from FTP and resample to hourly time series. """
return fetch_isd_hourly_temp_data(self.usaf_id, year)
def fetch_isd_daily_temp_data(self, year):
""" Pull raw ISD temperature data for the given year directly from FTP and resample to daily time series. """
return fetch_isd_daily_temp_data(self.usaf_id, year)
def fetch_gsod_daily_temp_data(self, year):
""" Pull raw GSOD temperature data for the given year directly from FTP and resample to daily time series. """
return fetch_gsod_daily_temp_data(self.usaf_id, year)
def fetch_tmy3_hourly_temp_data(self):
""" Pull hourly TMY3 temperature hourly time series directly from NREL. """
return fetch_tmy3_hourly_temp_data(self.usaf_id)
def fetch_cz2010_hourly_temp_data(self):
""" Pull hourly CZ2010 temperature hourly time series from URL. """
return fetch_cz2010_hourly_temp_data(self.usaf_id)
# get key-value store key
def get_isd_hourly_temp_data_cache_key(self, year):
""" Get key used to cache resampled hourly ISD temperature data for the given year. """
return get_isd_hourly_temp_data_cache_key(self.usaf_id, year)
def get_isd_daily_temp_data_cache_key(self, year):
""" Get key used to cache resampled daily ISD temperature data for the given year. """
return get_isd_daily_temp_data_cache_key(self.usaf_id, year)
def get_gsod_daily_temp_data_cache_key(self, year):
""" Get key used to cache resampled daily GSOD temperature data for the given year. """
return get_gsod_daily_temp_data_cache_key(self.usaf_id, year)
def get_tmy3_hourly_temp_data_cache_key(self):
""" Get key used to cache TMY3 weather-normalized temperature data. """
return get_tmy3_hourly_temp_data_cache_key(self.usaf_id)
def get_cz2010_hourly_temp_data_cache_key(self):
""" Get key used to cache CZ2010 weather-normalized temperature data. """
return get_cz2010_hourly_temp_data_cache_key(self.usaf_id)
# is cached data expired? boolean. true if expired or not in cache
def cached_isd_hourly_temp_data_is_expired(self, year):
""" Return True if cache of resampled hourly ISD temperature data has expired or does not exist for the given year. """
return cached_isd_hourly_temp_data_is_expired(self.usaf_id, year)
def cached_isd_daily_temp_data_is_expired(self, year):
""" Return True if cache of resampled daily ISD temperature data has expired or does not exist for the given year. """
return cached_isd_daily_temp_data_is_expired(self.usaf_id, year)
def cached_gsod_daily_temp_data_is_expired(self, year):
""" Return True if cache of resampled daily GSOD temperature data has expired or does not exist for the given year. """
return cached_gsod_daily_temp_data_is_expired(self.usaf_id, year)
# check if data is available and delete data in the cache if it's expired
def validate_isd_hourly_temp_data_cache(self, year):
""" Delete cached resampled hourly ISD temperature data if it has expired for the given year. """
return validate_isd_hourly_temp_data_cache(self.usaf_id, year)
def validate_isd_daily_temp_data_cache(self, year):
""" Delete cached resampled daily ISD temperature data if it has expired for the given year. """
return validate_isd_daily_temp_data_cache(self.usaf_id, year)
def validate_gsod_daily_temp_data_cache(self, year):
""" Delete cached resampled daily GSOD temperature data if it has expired for the given year. """
return validate_gsod_daily_temp_data_cache(self.usaf_id, year)
def validate_tmy3_hourly_temp_data_cache(self):
""" Check if TMY3 data exists in cache. """
return validate_tmy3_hourly_temp_data_cache(self.usaf_id)
def validate_cz2010_hourly_temp_data_cache(self):
""" Check if CZ2010 data exists in cache. """
return validate_cz2010_hourly_temp_data_cache(self.usaf_id)
# pandas time series to json
def serialize_isd_hourly_temp_data(self, ts):
""" Serialize resampled hourly ISD pandas time series as JSON for caching. """
return serialize_isd_hourly_temp_data(ts)
def serialize_isd_daily_temp_data(self, ts):
""" Serialize resampled daily ISD pandas time series as JSON for caching. """
return serialize_isd_daily_temp_data(ts)
def serialize_gsod_daily_temp_data(self, ts):
""" Serialize resampled daily GSOD pandas time series as JSON for caching. """
return serialize_gsod_daily_temp_data(ts)
def serialize_tmy3_hourly_temp_data(self, ts):
""" Serialize hourly TMY3 pandas time series as JSON for caching. """
return serialize_tmy3_hourly_temp_data(ts)
def serialize_cz2010_hourly_temp_data(self, ts):
""" Serialize hourly CZ2010 pandas time series as JSON for caching. """
return serialize_cz2010_hourly_temp_data(ts)
# json to pandas time series
def deserialize_isd_hourly_temp_data(self, data):
""" Deserialize JSON representation of resampled hourly ISD into pandas time series. """
return deserialize_isd_hourly_temp_data(data)
def deserialize_isd_daily_temp_data(self, data):
""" Deserialize JSON representation of resampled daily ISD into pandas time series. """
return deserialize_isd_daily_temp_data(data)
def deserialize_gsod_daily_temp_data(self, data):
""" Deserialize JSON representation of resampled daily GSOD into pandas time series. """
return deserialize_gsod_daily_temp_data(data)
def deserialize_tmy3_hourly_temp_data(self, data):
""" Deserialize JSON representation of hourly TMY3 into pandas time series. """
return deserialize_isd_hourly_temp_data(data)
def deserialize_cz2010_hourly_temp_data(self, data):
""" Deserialize JSON representation of hourly CZ2010 into pandas time series. """
return deserialize_cz2010_hourly_temp_data(data)
# return pandas time series of data from cache
def read_isd_hourly_temp_data_from_cache(self, year):
""" Get cached version of resampled hourly ISD temperature data for given year. """
return read_isd_hourly_temp_data_from_cache(self.usaf_id, year)
def read_isd_daily_temp_data_from_cache(self, year):
""" Get cached version of resampled daily ISD temperature data for given year. """
return read_isd_daily_temp_data_from_cache(self.usaf_id, year)
def read_gsod_daily_temp_data_from_cache(self, year):
""" Get cached version of resampled daily GSOD temperature data for given year. """
return read_gsod_daily_temp_data_from_cache(self.usaf_id, year)
def read_tmy3_hourly_temp_data_from_cache(self):
""" Get cached version of hourly TMY3 temperature data. """
return read_tmy3_hourly_temp_data_from_cache(self.usaf_id)
def read_cz2010_hourly_temp_data_from_cache(self):
""" Get cached version of hourly TMY3 temperature data. """
return read_cz2010_hourly_temp_data_from_cache(self.usaf_id)
# write pandas time series of data to cache for a particular year
def write_isd_hourly_temp_data_to_cache(self, year, ts):
""" Write resampled hourly ISD temperature data to cache for given year. """
return write_isd_hourly_temp_data_to_cache(self.usaf_id, year, ts)
def write_isd_daily_temp_data_to_cache(self, year, ts):
""" Write resampled daily ISD temperature data to cache for given year. """
return write_isd_daily_temp_data_to_cache(self.usaf_id, year, ts)
def write_gsod_daily_temp_data_to_cache(self, year, ts):
""" Write resampled daily GSOD temperature data to cache for given year. """
return write_gsod_daily_temp_data_to_cache(self.usaf_id, year, ts)
def write_tmy3_hourly_temp_data_to_cache(self, ts):
""" Write hourly TMY3 temperature data to cache for given year. """
return write_tmy3_hourly_temp_data_to_cache(self.usaf_id, ts)
def write_cz2010_hourly_temp_data_to_cache(self, ts):
""" Write hourly CZ2010 temperature data to cache for given year. """
return write_cz2010_hourly_temp_data_to_cache(self.usaf_id, ts)
# delete cached data for a particular year
def destroy_cached_isd_hourly_temp_data(self, year):
""" Remove cached resampled hourly ISD temperature data to cache for given year. """
return destroy_cached_isd_hourly_temp_data(self.usaf_id, year)
def destroy_cached_isd_daily_temp_data(self, year):
""" Remove cached resampled daily ISD temperature data to cache for given year. """
return destroy_cached_isd_daily_temp_data(self.usaf_id, year)
def destroy_cached_gsod_daily_temp_data(self, year):
""" Remove cached resampled daily GSOD temperature data to cache for given year. """
return destroy_cached_gsod_daily_temp_data(self.usaf_id, year)
def destroy_cached_tmy3_hourly_temp_data(self):
""" Remove cached hourly TMY3 temperature data to cache. """
return destroy_cached_tmy3_hourly_temp_data(self.usaf_id)
def destroy_cached_cz2010_hourly_temp_data(self):
""" Remove cached hourly CZ2010 temperature data to cache. """
return destroy_cached_cz2010_hourly_temp_data(self.usaf_id)
# load data either from cache if valid or directly from source
def load_isd_hourly_temp_data_cached_proxy(self, year):
""" Load resampled hourly ISD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_isd_hourly_temp_data_cached_proxy(self.usaf_id, year)
def load_isd_daily_temp_data_cached_proxy(self, year):
""" Load resampled daily ISD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_isd_daily_temp_data_cached_proxy(self.usaf_id, year)
def load_gsod_daily_temp_data_cached_proxy(self, year):
""" Load resampled daily GSOD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_gsod_daily_temp_data_cached_proxy(self.usaf_id, year)
def load_tmy3_hourly_temp_data_cached_proxy(self):
""" Load hourly TMY3 temperature data from cache, or if it is expired or hadn't been cached, fetch from NREL. """
return load_tmy3_hourly_temp_data_cached_proxy(self.usaf_id)
def load_cz2010_hourly_temp_data_cached_proxy(self):
""" Load hourly CZ2010 temperature data from cache, or if it is expired or hadn't been cached, fetch from URL. """
return load_cz2010_hourly_temp_data_cached_proxy(self.usaf_id)
# main interface: load data from start date to end date
def load_isd_hourly_temp_data(
self,
start,
end,
read_from_cache=True,
write_to_cache=True,
error_on_missing_years=True,
):
""" Load resampled hourly ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled hourly ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_isd_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
error_on_missing_years=error_on_missing_years,
)
def load_isd_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load resampled daily ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_isd_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_gsod_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load resampled daily GSOD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily GSOD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_gsod_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_cz2010_hourly_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load hourly CZ2010 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly CZ2010 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_cz2010_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
# load all cached data for this station
def load_cached_isd_hourly_temp_data(self):
""" Load all cached resampled hourly ISD temperature data. """
return load_cached_isd_hourly_temp_data(self.usaf_id)
def load_cached_isd_daily_temp_data(self):
""" Load all cached resampled daily ISD temperature data. """
return load_cached_isd_daily_temp_data(self.usaf_id)
def load_cached_gsod_daily_temp_data(self):
""" Load all cached resampled daily GSOD temperature data. """
return load_cached_gsod_daily_temp_data(self.usaf_id)
def load_cached_tmy3_hourly_temp_data(self):
""" Load all cached hourly TMY3 temperature data (the year is set to 1900) """
return load_cached_tmy3_hourly_temp_data(self.usaf_id)
def load_cached_cz2010_hourly_temp_data(self):
""" Load all cached hourly TMY3 temperature data (the year is set to 1900) """
return load_cached_cz2010_hourly_temp_data(self.usaf_id)
|
openeemeter/eeweather | eeweather/stations.py | ISDStation.load_cz2010_hourly_temp_data | python | def load_cz2010_hourly_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
return load_cz2010_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
) | Load hourly CZ2010 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly CZ2010 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/stations.py#L1504-L1528 | [
"def load_cz2010_hourly_temp_data(\n usaf_id, start, end, read_from_cache=True, write_to_cache=True\n):\n\n # CalTRACK 2.3.3\n if start.tzinfo != pytz.UTC:\n raise NonUTCTimezoneInfoError(start)\n if end.tzinfo != pytz.UTC:\n raise NonUTCTimezoneInfoError(start)\n single_year_data = loa... | class ISDStation(object):
""" A representation of an Integrated Surface Database weather station.
Contains data about a particular ISD station, as well as methods to pull
data for this station.
Parameters
----------
usaf_id : str
ISD station USAF ID
load_metatdata : bool, optional
Whether or not to auto-load metadata for this station
Attributes
----------
usaf_id : str
ISD station USAF ID
iecc_climate_zone : str
IECC Climate Zone
iecc_moisture_regime : str
IECC Moisture Regime
ba_climate_zone : str
Building America Climate Zone
ca_climate_zone : str
California Building Climate Zone
elevation : float
elevation of station
latitude : float
latitude of station
longitude : float
longitude of station
coords : tuple of (float, float)
lat/long coordinates of station
name : str
name of the station
quality : str
"high", "medium", "low"
wban_ids : list of str
list of WBAN IDs, or "99999" which have been used to identify the station.
recent_wban_id = None
WBAN ID most recently used to identify the station.
climate_zones = {}
dict of all climate zones.
"""
def __init__(self, usaf_id, load_metadata=True):
self.usaf_id = usaf_id
if load_metadata:
self._load_metadata()
else:
valid_usaf_id_or_raise(usaf_id)
self.iecc_climate_zone = None
self.iecc_moisture_regime = None
self.ba_climate_zone = None
self.ca_climate_zone = None
self.elevation = None
self.latitude = None
self.longitude = None
self.coords = None
self.name = None
self.quality = None
self.wban_ids = None
self.recent_wban_id = None
self.climate_zones = {}
def __str__(self):
return self.usaf_id
def __repr__(self):
return "ISDStation('{}')".format(self.usaf_id)
def _load_metadata(self):
metadata = get_isd_station_metadata(self.usaf_id)
def _float_or_none(field):
value = metadata.get(field)
return None if value is None else float(value)
self.iecc_climate_zone = metadata.get("iecc_climate_zone")
self.iecc_moisture_regime = metadata.get("iecc_moisture_regime")
self.ba_climate_zone = metadata.get("ba_climate_zone")
self.ca_climate_zone = metadata.get("ca_climate_zone")
self.icao_code = metadata.get("icao_code")
self.elevation = _float_or_none("elevation") # meters
self.latitude = _float_or_none("latitude")
self.longitude = _float_or_none("longitude")
self.coords = (self.latitude, self.longitude)
self.name = metadata.get("name")
self.quality = metadata.get("quality")
self.wban_ids = metadata.get("wban_ids", "").split(",")
self.recent_wban_id = metadata.get("recent_wban_id")
self.climate_zones = {
"iecc_climate_zone": metadata.get("iecc_climate_zone"),
"iecc_moisture_regime": metadata.get("iecc_moisture_regime"),
"ba_climate_zone": metadata.get("ba_climate_zone"),
"ca_climate_zone": metadata.get("ca_climate_zone"),
}
def json(self):
""" Return a JSON-serializeable object containing station metadata."""
return {
"elevation": self.elevation,
"latitude": self.latitude,
"longitude": self.longitude,
"icao_code": self.icao_code,
"name": self.name,
"quality": self.quality,
"wban_ids": self.wban_ids,
"recent_wban_id": self.recent_wban_id,
"climate_zones": {
"iecc_climate_zone": self.iecc_climate_zone,
"iecc_moisture_regime": self.iecc_moisture_regime,
"ba_climate_zone": self.ba_climate_zone,
"ca_climate_zone": self.ca_climate_zone,
},
}
def get_isd_filenames(self, year=None, with_host=False):
""" Get filenames of raw ISD station data. """
return get_isd_filenames(self.usaf_id, year, with_host=with_host)
def get_gsod_filenames(self, year=None, with_host=False):
""" Get filenames of raw GSOD station data. """
return get_gsod_filenames(self.usaf_id, year, with_host=with_host)
def get_isd_file_metadata(self):
""" Get raw file metadata for the station. """
return get_isd_file_metadata(self.usaf_id)
# fetch raw data
def fetch_isd_raw_temp_data(self, year):
""" Pull raw ISD data for the given year directly from FTP. """
return fetch_isd_raw_temp_data(self.usaf_id, year)
def fetch_gsod_raw_temp_data(self, year):
""" Pull raw GSOD data for the given year directly from FTP. """
return fetch_gsod_raw_temp_data(self.usaf_id, year)
# fetch raw data then frequency-normalize
def fetch_isd_hourly_temp_data(self, year):
""" Pull raw ISD temperature data for the given year directly from FTP and resample to hourly time series. """
return fetch_isd_hourly_temp_data(self.usaf_id, year)
def fetch_isd_daily_temp_data(self, year):
""" Pull raw ISD temperature data for the given year directly from FTP and resample to daily time series. """
return fetch_isd_daily_temp_data(self.usaf_id, year)
def fetch_gsod_daily_temp_data(self, year):
""" Pull raw GSOD temperature data for the given year directly from FTP and resample to daily time series. """
return fetch_gsod_daily_temp_data(self.usaf_id, year)
def fetch_tmy3_hourly_temp_data(self):
""" Pull hourly TMY3 temperature hourly time series directly from NREL. """
return fetch_tmy3_hourly_temp_data(self.usaf_id)
def fetch_cz2010_hourly_temp_data(self):
""" Pull hourly CZ2010 temperature hourly time series from URL. """
return fetch_cz2010_hourly_temp_data(self.usaf_id)
# get key-value store key
def get_isd_hourly_temp_data_cache_key(self, year):
""" Get key used to cache resampled hourly ISD temperature data for the given year. """
return get_isd_hourly_temp_data_cache_key(self.usaf_id, year)
def get_isd_daily_temp_data_cache_key(self, year):
""" Get key used to cache resampled daily ISD temperature data for the given year. """
return get_isd_daily_temp_data_cache_key(self.usaf_id, year)
def get_gsod_daily_temp_data_cache_key(self, year):
""" Get key used to cache resampled daily GSOD temperature data for the given year. """
return get_gsod_daily_temp_data_cache_key(self.usaf_id, year)
def get_tmy3_hourly_temp_data_cache_key(self):
""" Get key used to cache TMY3 weather-normalized temperature data. """
return get_tmy3_hourly_temp_data_cache_key(self.usaf_id)
def get_cz2010_hourly_temp_data_cache_key(self):
""" Get key used to cache CZ2010 weather-normalized temperature data. """
return get_cz2010_hourly_temp_data_cache_key(self.usaf_id)
# is cached data expired? boolean. true if expired or not in cache
def cached_isd_hourly_temp_data_is_expired(self, year):
""" Return True if cache of resampled hourly ISD temperature data has expired or does not exist for the given year. """
return cached_isd_hourly_temp_data_is_expired(self.usaf_id, year)
def cached_isd_daily_temp_data_is_expired(self, year):
""" Return True if cache of resampled daily ISD temperature data has expired or does not exist for the given year. """
return cached_isd_daily_temp_data_is_expired(self.usaf_id, year)
def cached_gsod_daily_temp_data_is_expired(self, year):
""" Return True if cache of resampled daily GSOD temperature data has expired or does not exist for the given year. """
return cached_gsod_daily_temp_data_is_expired(self.usaf_id, year)
# check if data is available and delete data in the cache if it's expired
def validate_isd_hourly_temp_data_cache(self, year):
""" Delete cached resampled hourly ISD temperature data if it has expired for the given year. """
return validate_isd_hourly_temp_data_cache(self.usaf_id, year)
def validate_isd_daily_temp_data_cache(self, year):
""" Delete cached resampled daily ISD temperature data if it has expired for the given year. """
return validate_isd_daily_temp_data_cache(self.usaf_id, year)
def validate_gsod_daily_temp_data_cache(self, year):
""" Delete cached resampled daily GSOD temperature data if it has expired for the given year. """
return validate_gsod_daily_temp_data_cache(self.usaf_id, year)
def validate_tmy3_hourly_temp_data_cache(self):
""" Check if TMY3 data exists in cache. """
return validate_tmy3_hourly_temp_data_cache(self.usaf_id)
def validate_cz2010_hourly_temp_data_cache(self):
""" Check if CZ2010 data exists in cache. """
return validate_cz2010_hourly_temp_data_cache(self.usaf_id)
# pandas time series to json
def serialize_isd_hourly_temp_data(self, ts):
""" Serialize resampled hourly ISD pandas time series as JSON for caching. """
return serialize_isd_hourly_temp_data(ts)
def serialize_isd_daily_temp_data(self, ts):
""" Serialize resampled daily ISD pandas time series as JSON for caching. """
return serialize_isd_daily_temp_data(ts)
def serialize_gsod_daily_temp_data(self, ts):
""" Serialize resampled daily GSOD pandas time series as JSON for caching. """
return serialize_gsod_daily_temp_data(ts)
def serialize_tmy3_hourly_temp_data(self, ts):
""" Serialize hourly TMY3 pandas time series as JSON for caching. """
return serialize_tmy3_hourly_temp_data(ts)
def serialize_cz2010_hourly_temp_data(self, ts):
""" Serialize hourly CZ2010 pandas time series as JSON for caching. """
return serialize_cz2010_hourly_temp_data(ts)
# json to pandas time series
def deserialize_isd_hourly_temp_data(self, data):
""" Deserialize JSON representation of resampled hourly ISD into pandas time series. """
return deserialize_isd_hourly_temp_data(data)
def deserialize_isd_daily_temp_data(self, data):
""" Deserialize JSON representation of resampled daily ISD into pandas time series. """
return deserialize_isd_daily_temp_data(data)
def deserialize_gsod_daily_temp_data(self, data):
""" Deserialize JSON representation of resampled daily GSOD into pandas time series. """
return deserialize_gsod_daily_temp_data(data)
def deserialize_tmy3_hourly_temp_data(self, data):
""" Deserialize JSON representation of hourly TMY3 into pandas time series. """
return deserialize_isd_hourly_temp_data(data)
def deserialize_cz2010_hourly_temp_data(self, data):
""" Deserialize JSON representation of hourly CZ2010 into pandas time series. """
return deserialize_cz2010_hourly_temp_data(data)
# return pandas time series of data from cache
def read_isd_hourly_temp_data_from_cache(self, year):
""" Get cached version of resampled hourly ISD temperature data for given year. """
return read_isd_hourly_temp_data_from_cache(self.usaf_id, year)
def read_isd_daily_temp_data_from_cache(self, year):
""" Get cached version of resampled daily ISD temperature data for given year. """
return read_isd_daily_temp_data_from_cache(self.usaf_id, year)
def read_gsod_daily_temp_data_from_cache(self, year):
""" Get cached version of resampled daily GSOD temperature data for given year. """
return read_gsod_daily_temp_data_from_cache(self.usaf_id, year)
def read_tmy3_hourly_temp_data_from_cache(self):
""" Get cached version of hourly TMY3 temperature data. """
return read_tmy3_hourly_temp_data_from_cache(self.usaf_id)
def read_cz2010_hourly_temp_data_from_cache(self):
""" Get cached version of hourly TMY3 temperature data. """
return read_cz2010_hourly_temp_data_from_cache(self.usaf_id)
# write pandas time series of data to cache for a particular year
def write_isd_hourly_temp_data_to_cache(self, year, ts):
""" Write resampled hourly ISD temperature data to cache for given year. """
return write_isd_hourly_temp_data_to_cache(self.usaf_id, year, ts)
def write_isd_daily_temp_data_to_cache(self, year, ts):
""" Write resampled daily ISD temperature data to cache for given year. """
return write_isd_daily_temp_data_to_cache(self.usaf_id, year, ts)
def write_gsod_daily_temp_data_to_cache(self, year, ts):
""" Write resampled daily GSOD temperature data to cache for given year. """
return write_gsod_daily_temp_data_to_cache(self.usaf_id, year, ts)
def write_tmy3_hourly_temp_data_to_cache(self, ts):
""" Write hourly TMY3 temperature data to cache for given year. """
return write_tmy3_hourly_temp_data_to_cache(self.usaf_id, ts)
def write_cz2010_hourly_temp_data_to_cache(self, ts):
""" Write hourly CZ2010 temperature data to cache for given year. """
return write_cz2010_hourly_temp_data_to_cache(self.usaf_id, ts)
# delete cached data for a particular year
def destroy_cached_isd_hourly_temp_data(self, year):
""" Remove cached resampled hourly ISD temperature data to cache for given year. """
return destroy_cached_isd_hourly_temp_data(self.usaf_id, year)
def destroy_cached_isd_daily_temp_data(self, year):
""" Remove cached resampled daily ISD temperature data to cache for given year. """
return destroy_cached_isd_daily_temp_data(self.usaf_id, year)
def destroy_cached_gsod_daily_temp_data(self, year):
""" Remove cached resampled daily GSOD temperature data to cache for given year. """
return destroy_cached_gsod_daily_temp_data(self.usaf_id, year)
def destroy_cached_tmy3_hourly_temp_data(self):
""" Remove cached hourly TMY3 temperature data to cache. """
return destroy_cached_tmy3_hourly_temp_data(self.usaf_id)
def destroy_cached_cz2010_hourly_temp_data(self):
""" Remove cached hourly CZ2010 temperature data to cache. """
return destroy_cached_cz2010_hourly_temp_data(self.usaf_id)
# load data either from cache if valid or directly from source
def load_isd_hourly_temp_data_cached_proxy(self, year):
""" Load resampled hourly ISD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_isd_hourly_temp_data_cached_proxy(self.usaf_id, year)
def load_isd_daily_temp_data_cached_proxy(self, year):
""" Load resampled daily ISD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_isd_daily_temp_data_cached_proxy(self.usaf_id, year)
def load_gsod_daily_temp_data_cached_proxy(self, year):
""" Load resampled daily GSOD temperature data from cache, or if it is expired or hadn't been cached, fetch from FTP for given year. """
return load_gsod_daily_temp_data_cached_proxy(self.usaf_id, year)
def load_tmy3_hourly_temp_data_cached_proxy(self):
""" Load hourly TMY3 temperature data from cache, or if it is expired or hadn't been cached, fetch from NREL. """
return load_tmy3_hourly_temp_data_cached_proxy(self.usaf_id)
def load_cz2010_hourly_temp_data_cached_proxy(self):
""" Load hourly CZ2010 temperature data from cache, or if it is expired or hadn't been cached, fetch from URL. """
return load_cz2010_hourly_temp_data_cached_proxy(self.usaf_id)
# main interface: load data from start date to end date
def load_isd_hourly_temp_data(
self,
start,
end,
read_from_cache=True,
write_to_cache=True,
error_on_missing_years=True,
):
""" Load resampled hourly ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled hourly ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_isd_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
error_on_missing_years=error_on_missing_years,
)
def load_isd_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load resampled daily ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_isd_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_gsod_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load resampled daily GSOD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily GSOD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_gsod_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
def load_tmy3_hourly_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load hourly TMY3 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly TMY3 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_tmy3_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
)
# load all cached data for this station
def load_cached_isd_hourly_temp_data(self):
""" Load all cached resampled hourly ISD temperature data. """
return load_cached_isd_hourly_temp_data(self.usaf_id)
def load_cached_isd_daily_temp_data(self):
""" Load all cached resampled daily ISD temperature data. """
return load_cached_isd_daily_temp_data(self.usaf_id)
def load_cached_gsod_daily_temp_data(self):
""" Load all cached resampled daily GSOD temperature data. """
return load_cached_gsod_daily_temp_data(self.usaf_id)
def load_cached_tmy3_hourly_temp_data(self):
""" Load all cached hourly TMY3 temperature data (the year is set to 1900) """
return load_cached_tmy3_hourly_temp_data(self.usaf_id)
def load_cached_cz2010_hourly_temp_data(self):
""" Load all cached hourly TMY3 temperature data (the year is set to 1900) """
return load_cached_cz2010_hourly_temp_data(self.usaf_id)
|
openeemeter/eeweather | eeweather/visualization.py | plot_station_mapping | python | def plot_station_mapping(
target_latitude,
target_longitude,
isd_station,
distance_meters,
target_label="target",
): # pragma: no cover
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Plotting requires matplotlib.")
try:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cartopy.io.img_tiles as cimgt
except ImportError:
raise ImportError("Plotting requires cartopy.")
lat, lng = isd_station.coords
t_lat, t_lng = float(target_latitude), float(target_longitude)
# fiture
fig = plt.figure(figsize=(16, 8))
# axes
tiles = cimgt.StamenTerrain()
ax = plt.subplot(1, 1, 1, projection=tiles.crs)
# offsets for labels
x_max = max([lng, t_lng])
x_min = min([lng, t_lng])
x_diff = x_max - x_min
y_max = max([lat, t_lat])
y_min = min([lat, t_lat])
y_diff = y_max - y_min
xoffset = x_diff * 0.05
yoffset = y_diff * 0.05
# minimum
left = x_min - x_diff * 0.5
right = x_max + x_diff * 0.5
bottom = y_min - y_diff * 0.3
top = y_max + y_diff * 0.3
width_ratio = 2.
height_ratio = 1.
if (right - left) / (top - bottom) > width_ratio / height_ratio:
# too short
goal = (right - left) * height_ratio / width_ratio
diff = goal - (top - bottom)
bottom = bottom - diff / 2.
top = top + diff / 2.
else:
# too skinny
goal = (top - bottom) * width_ratio / height_ratio
diff = goal - (right - left)
left = left - diff / 2.
right = right + diff / 2.
ax.set_extent([left, right, bottom, top])
# determine zoom level
# tile size at level 1 = 64 km
# level 2 = 32 km, level 3 = 16 km, etc, i.e. 128/(2^n) km
N_TILES = 600 # (how many tiles approximately fit in distance)
km = distance_meters / 1000.0
zoom_level = int(np.log2(128 * N_TILES / km))
ax.add_image(tiles, zoom_level)
# line between
plt.plot(
[lng, t_lng],
[lat, t_lat],
linestyle="-",
dashes=[2, 2],
transform=ccrs.Geodetic(),
)
# station
ax.plot(lng, lat, "ko", markersize=7, transform=ccrs.Geodetic())
# target
ax.plot(t_lng, t_lat, "ro", markersize=7, transform=ccrs.Geodetic())
# station label
station_label = "{} ({})".format(isd_station.usaf_id, isd_station.name)
ax.text(lng + xoffset, lat + yoffset, station_label, transform=ccrs.Geodetic())
# target label
ax.text(t_lng + xoffset, t_lat + yoffset, target_label, transform=ccrs.Geodetic())
# distance labels
mid_lng = (lng + t_lng) / 2
mid_lat = (lat + t_lat) / 2
dist_text = "{:.01f} km".format(km)
ax.text(mid_lng + xoffset, mid_lat + yoffset, dist_text, transform=ccrs.Geodetic())
plt.show() | Plots this mapping on a map. | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/visualization.py#L29-L132 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from .connections import metadata_db_connection_proxy
from .exceptions import UnrecognizedUSAFIDError
from .stations import ISDStation
__all__ = ("plot_station_mapping", "plot_station_mappings")
def plot_station_mappings(mapping_results): # pragma: no cover
""" Plot a list of mapping results on a map.
Requires matplotlib and cartopy.
Parameters
----------
mapping_results : list of MappingResult objects
Mapping results to plot
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Plotting requires matplotlib.")
try:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
except ImportError:
raise ImportError("Plotting requires cartopy.")
lats = []
lngs = []
t_lats = []
t_lngs = []
n_discards = 0
for mapping_result in mapping_results:
if not mapping_result.is_empty():
lat, lng = mapping_result.isd_station.coords
t_lat, t_lng = map(float, mapping_result.target_coords)
lats.append(lat)
lngs.append(lng)
t_lats.append(t_lat)
t_lngs.append(t_lng)
else:
n_discards += 1
print("Discarded {} empty mappings".format(n_discards))
# figure
fig = plt.figure(figsize=(60, 60))
# axes
ax = plt.subplot(1, 1, 1, projection=ccrs.Mercator())
# offsets for labels
all_lngs = lngs + t_lngs
all_lats = lats + t_lats
x_max = max(all_lngs) # lists
x_min = min(all_lngs)
x_diff = x_max - x_min
y_max = max(all_lats)
y_min = min(all_lats)
y_diff = y_max - y_min
# minimum
x_pad = 0.1 * x_diff
y_pad = 0.1 * y_diff
left = x_min - x_pad
right = x_max + x_pad
bottom = y_min - y_pad
top = y_max + y_pad
width_ratio = 2.
height_ratio = 1.
if (right - left) / (top - bottom) > height_ratio / width_ratio:
# too short
goal = (right - left) * height_ratio / width_ratio
diff = goal - (top - bottom)
bottom = bottom - diff / 2.
top = top + diff / 2.
else:
# too skinny
goal = (top - bottom) * width_ratio / height_ratio
diff = goal - (right - left)
left = left - diff / 2.
right = right + diff / 2.
left = max(left, -179.9)
right = min(right, 179.9)
bottom = max([bottom, -89.9])
top = min([top, 89.9])
ax.set_extent([left, right, bottom, top])
# OCEAN
ax.add_feature(
cfeature.NaturalEarthFeature(
"physical",
"ocean",
"50m",
edgecolor="face",
facecolor=cfeature.COLORS["water"],
)
)
# LAND
ax.add_feature(
cfeature.NaturalEarthFeature(
"physical",
"land",
"50m",
edgecolor="face",
facecolor=cfeature.COLORS["land"],
)
)
# BORDERS
ax.add_feature(
cfeature.NaturalEarthFeature(
"cultural",
"admin_0_boundary_lines_land",
"50m",
edgecolor="black",
facecolor="none",
)
)
# LAKES
ax.add_feature(
cfeature.NaturalEarthFeature(
"physical",
"lakes",
"50m",
edgecolor="face",
facecolor=cfeature.COLORS["water"],
)
)
# COASTLINE
ax.add_feature(
cfeature.NaturalEarthFeature(
"physical", "coastline", "50m", edgecolor="black", facecolor="none"
)
)
# lines between
# for lat, t_lat, lng, t_lng in zip(lats, t_lats, lngs, t_lngs):
ax.plot(
[lngs, t_lngs],
[lats, t_lats],
color="k",
linestyle="-",
transform=ccrs.Geodetic(),
linewidth=0.3,
)
# stations
ax.plot(lngs, lats, "bo", markersize=1, transform=ccrs.Geodetic())
plt.title("Location to weather station mapping")
plt.show()
|
openeemeter/eeweather | eeweather/visualization.py | plot_station_mappings | python | def plot_station_mappings(mapping_results): # pragma: no cover
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Plotting requires matplotlib.")
try:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
except ImportError:
raise ImportError("Plotting requires cartopy.")
lats = []
lngs = []
t_lats = []
t_lngs = []
n_discards = 0
for mapping_result in mapping_results:
if not mapping_result.is_empty():
lat, lng = mapping_result.isd_station.coords
t_lat, t_lng = map(float, mapping_result.target_coords)
lats.append(lat)
lngs.append(lng)
t_lats.append(t_lat)
t_lngs.append(t_lng)
else:
n_discards += 1
print("Discarded {} empty mappings".format(n_discards))
# figure
fig = plt.figure(figsize=(60, 60))
# axes
ax = plt.subplot(1, 1, 1, projection=ccrs.Mercator())
# offsets for labels
all_lngs = lngs + t_lngs
all_lats = lats + t_lats
x_max = max(all_lngs) # lists
x_min = min(all_lngs)
x_diff = x_max - x_min
y_max = max(all_lats)
y_min = min(all_lats)
y_diff = y_max - y_min
# minimum
x_pad = 0.1 * x_diff
y_pad = 0.1 * y_diff
left = x_min - x_pad
right = x_max + x_pad
bottom = y_min - y_pad
top = y_max + y_pad
width_ratio = 2.
height_ratio = 1.
if (right - left) / (top - bottom) > height_ratio / width_ratio:
# too short
goal = (right - left) * height_ratio / width_ratio
diff = goal - (top - bottom)
bottom = bottom - diff / 2.
top = top + diff / 2.
else:
# too skinny
goal = (top - bottom) * width_ratio / height_ratio
diff = goal - (right - left)
left = left - diff / 2.
right = right + diff / 2.
left = max(left, -179.9)
right = min(right, 179.9)
bottom = max([bottom, -89.9])
top = min([top, 89.9])
ax.set_extent([left, right, bottom, top])
# OCEAN
ax.add_feature(
cfeature.NaturalEarthFeature(
"physical",
"ocean",
"50m",
edgecolor="face",
facecolor=cfeature.COLORS["water"],
)
)
# LAND
ax.add_feature(
cfeature.NaturalEarthFeature(
"physical",
"land",
"50m",
edgecolor="face",
facecolor=cfeature.COLORS["land"],
)
)
# BORDERS
ax.add_feature(
cfeature.NaturalEarthFeature(
"cultural",
"admin_0_boundary_lines_land",
"50m",
edgecolor="black",
facecolor="none",
)
)
# LAKES
ax.add_feature(
cfeature.NaturalEarthFeature(
"physical",
"lakes",
"50m",
edgecolor="face",
facecolor=cfeature.COLORS["water"],
)
)
# COASTLINE
ax.add_feature(
cfeature.NaturalEarthFeature(
"physical", "coastline", "50m", edgecolor="black", facecolor="none"
)
)
# lines between
# for lat, t_lat, lng, t_lng in zip(lats, t_lats, lngs, t_lngs):
ax.plot(
[lngs, t_lngs],
[lats, t_lats],
color="k",
linestyle="-",
transform=ccrs.Geodetic(),
linewidth=0.3,
)
# stations
ax.plot(lngs, lats, "bo", markersize=1, transform=ccrs.Geodetic())
plt.title("Location to weather station mapping")
plt.show() | Plot a list of mapping results on a map.
Requires matplotlib and cartopy.
Parameters
----------
mapping_results : list of MappingResult objects
Mapping results to plot | train | https://github.com/openeemeter/eeweather/blob/d32b7369b26edfa3ee431c60457afeb0593123a7/eeweather/visualization.py#L135-L289 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from .connections import metadata_db_connection_proxy
from .exceptions import UnrecognizedUSAFIDError
from .stations import ISDStation
__all__ = ("plot_station_mapping", "plot_station_mappings")
def plot_station_mapping(
target_latitude,
target_longitude,
isd_station,
distance_meters,
target_label="target",
): # pragma: no cover
""" Plots this mapping on a map."""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Plotting requires matplotlib.")
try:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cartopy.io.img_tiles as cimgt
except ImportError:
raise ImportError("Plotting requires cartopy.")
lat, lng = isd_station.coords
t_lat, t_lng = float(target_latitude), float(target_longitude)
# fiture
fig = plt.figure(figsize=(16, 8))
# axes
tiles = cimgt.StamenTerrain()
ax = plt.subplot(1, 1, 1, projection=tiles.crs)
# offsets for labels
x_max = max([lng, t_lng])
x_min = min([lng, t_lng])
x_diff = x_max - x_min
y_max = max([lat, t_lat])
y_min = min([lat, t_lat])
y_diff = y_max - y_min
xoffset = x_diff * 0.05
yoffset = y_diff * 0.05
# minimum
left = x_min - x_diff * 0.5
right = x_max + x_diff * 0.5
bottom = y_min - y_diff * 0.3
top = y_max + y_diff * 0.3
width_ratio = 2.
height_ratio = 1.
if (right - left) / (top - bottom) > width_ratio / height_ratio:
# too short
goal = (right - left) * height_ratio / width_ratio
diff = goal - (top - bottom)
bottom = bottom - diff / 2.
top = top + diff / 2.
else:
# too skinny
goal = (top - bottom) * width_ratio / height_ratio
diff = goal - (right - left)
left = left - diff / 2.
right = right + diff / 2.
ax.set_extent([left, right, bottom, top])
# determine zoom level
# tile size at level 1 = 64 km
# level 2 = 32 km, level 3 = 16 km, etc, i.e. 128/(2^n) km
N_TILES = 600 # (how many tiles approximately fit in distance)
km = distance_meters / 1000.0
zoom_level = int(np.log2(128 * N_TILES / km))
ax.add_image(tiles, zoom_level)
# line between
plt.plot(
[lng, t_lng],
[lat, t_lat],
linestyle="-",
dashes=[2, 2],
transform=ccrs.Geodetic(),
)
# station
ax.plot(lng, lat, "ko", markersize=7, transform=ccrs.Geodetic())
# target
ax.plot(t_lng, t_lat, "ro", markersize=7, transform=ccrs.Geodetic())
# station label
station_label = "{} ({})".format(isd_station.usaf_id, isd_station.name)
ax.text(lng + xoffset, lat + yoffset, station_label, transform=ccrs.Geodetic())
# target label
ax.text(t_lng + xoffset, t_lat + yoffset, target_label, transform=ccrs.Geodetic())
# distance labels
mid_lng = (lng + t_lng) / 2
mid_lat = (lat + t_lat) / 2
dist_text = "{:.01f} km".format(km)
ax.text(mid_lng + xoffset, mid_lat + yoffset, dist_text, transform=ccrs.Geodetic())
plt.show()
|
fabaff/python-mystrom | pymystrom/switch.py | MyStromPlug.set_relay_on | python | def set_relay_on(self):
if not self.get_relay_state():
try:
request = requests.get(
'{}/relay'.format(self.resource), params={'state': '1'},
timeout=self.timeout)
if request.status_code == 200:
self.data['relay'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError() | Turn the relay on. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/switch.py#L23-L33 | [
"def get_relay_state(self):\n \"\"\"Get the relay state.\"\"\"\n self.get_status()\n try:\n self.state = self.data['relay']\n except TypeError:\n self.state = False\n\n return bool(self.state)\n"
] | class MyStromPlug(object):
"""A class for a myStrom switch."""
def __init__(self, host):
"""Initialize the switch."""
self.resource = 'http://{}'.format(host)
self.timeout = 5
self.data = None
self.state = None
self.consumption = 0
self.temperature = 0
def set_relay_off(self):
"""Turn the relay off."""
if self.get_relay_state():
try:
request = requests.get(
'{}/relay'.format(self.resource), params={'state': '0'},
timeout=self.timeout)
if request.status_code == 200:
self.data['relay'] = False
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def get_status(self):
"""Get the details from the switch."""
try:
request = requests.get(
'{}/report'.format(self.resource), timeout=self.timeout)
self.data = request.json()
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError()
def get_relay_state(self):
"""Get the relay state."""
self.get_status()
try:
self.state = self.data['relay']
except TypeError:
self.state = False
return bool(self.state)
def get_consumption(self):
"""Get current power consumption in mWh."""
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption
def get_temperature(self):
"""Get current temperature in celsius."""
try:
request = requests.get(
'{}/temp'.format(self.resource), timeout=self.timeout, allow_redirects=False)
self.temperature = request.json()['compensated']
return self.temperature
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
except ValueError:
raise exceptions.MyStromNotVersionTwoSwitch()
|
fabaff/python-mystrom | pymystrom/switch.py | MyStromPlug.set_relay_off | python | def set_relay_off(self):
if self.get_relay_state():
try:
request = requests.get(
'{}/relay'.format(self.resource), params={'state': '0'},
timeout=self.timeout)
if request.status_code == 200:
self.data['relay'] = False
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError() | Turn the relay off. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/switch.py#L35-L45 | [
"def get_relay_state(self):\n \"\"\"Get the relay state.\"\"\"\n self.get_status()\n try:\n self.state = self.data['relay']\n except TypeError:\n self.state = False\n\n return bool(self.state)\n"
] | class MyStromPlug(object):
"""A class for a myStrom switch."""
def __init__(self, host):
"""Initialize the switch."""
self.resource = 'http://{}'.format(host)
self.timeout = 5
self.data = None
self.state = None
self.consumption = 0
self.temperature = 0
def set_relay_on(self):
"""Turn the relay on."""
if not self.get_relay_state():
try:
request = requests.get(
'{}/relay'.format(self.resource), params={'state': '1'},
timeout=self.timeout)
if request.status_code == 200:
self.data['relay'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def get_status(self):
"""Get the details from the switch."""
try:
request = requests.get(
'{}/report'.format(self.resource), timeout=self.timeout)
self.data = request.json()
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError()
def get_relay_state(self):
"""Get the relay state."""
self.get_status()
try:
self.state = self.data['relay']
except TypeError:
self.state = False
return bool(self.state)
def get_consumption(self):
"""Get current power consumption in mWh."""
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption
def get_temperature(self):
"""Get current temperature in celsius."""
try:
request = requests.get(
'{}/temp'.format(self.resource), timeout=self.timeout, allow_redirects=False)
self.temperature = request.json()['compensated']
return self.temperature
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
except ValueError:
raise exceptions.MyStromNotVersionTwoSwitch()
|
fabaff/python-mystrom | pymystrom/switch.py | MyStromPlug.get_relay_state | python | def get_relay_state(self):
self.get_status()
try:
self.state = self.data['relay']
except TypeError:
self.state = False
return bool(self.state) | Get the relay state. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/switch.py#L57-L65 | [
"def get_status(self):\n \"\"\"Get the details from the switch.\"\"\"\n try:\n request = requests.get(\n '{}/report'.format(self.resource), timeout=self.timeout)\n self.data = request.json()\n return self.data\n except (requests.exceptions.ConnectionError, ValueError):\n ... | class MyStromPlug(object):
"""A class for a myStrom switch."""
def __init__(self, host):
"""Initialize the switch."""
self.resource = 'http://{}'.format(host)
self.timeout = 5
self.data = None
self.state = None
self.consumption = 0
self.temperature = 0
def set_relay_on(self):
"""Turn the relay on."""
if not self.get_relay_state():
try:
request = requests.get(
'{}/relay'.format(self.resource), params={'state': '1'},
timeout=self.timeout)
if request.status_code == 200:
self.data['relay'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_relay_off(self):
"""Turn the relay off."""
if self.get_relay_state():
try:
request = requests.get(
'{}/relay'.format(self.resource), params={'state': '0'},
timeout=self.timeout)
if request.status_code == 200:
self.data['relay'] = False
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def get_status(self):
"""Get the details from the switch."""
try:
request = requests.get(
'{}/report'.format(self.resource), timeout=self.timeout)
self.data = request.json()
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError()
def get_consumption(self):
"""Get current power consumption in mWh."""
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption
def get_temperature(self):
"""Get current temperature in celsius."""
try:
request = requests.get(
'{}/temp'.format(self.resource), timeout=self.timeout, allow_redirects=False)
self.temperature = request.json()['compensated']
return self.temperature
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
except ValueError:
raise exceptions.MyStromNotVersionTwoSwitch()
|
fabaff/python-mystrom | pymystrom/switch.py | MyStromPlug.get_consumption | python | def get_consumption(self):
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption | Get current power consumption in mWh. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/switch.py#L67-L75 | [
"def get_status(self):\n \"\"\"Get the details from the switch.\"\"\"\n try:\n request = requests.get(\n '{}/report'.format(self.resource), timeout=self.timeout)\n self.data = request.json()\n return self.data\n except (requests.exceptions.ConnectionError, ValueError):\n ... | class MyStromPlug(object):
"""A class for a myStrom switch."""
def __init__(self, host):
"""Initialize the switch."""
self.resource = 'http://{}'.format(host)
self.timeout = 5
self.data = None
self.state = None
self.consumption = 0
self.temperature = 0
def set_relay_on(self):
"""Turn the relay on."""
if not self.get_relay_state():
try:
request = requests.get(
'{}/relay'.format(self.resource), params={'state': '1'},
timeout=self.timeout)
if request.status_code == 200:
self.data['relay'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_relay_off(self):
"""Turn the relay off."""
if self.get_relay_state():
try:
request = requests.get(
'{}/relay'.format(self.resource), params={'state': '0'},
timeout=self.timeout)
if request.status_code == 200:
self.data['relay'] = False
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def get_status(self):
"""Get the details from the switch."""
try:
request = requests.get(
'{}/report'.format(self.resource), timeout=self.timeout)
self.data = request.json()
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError()
def get_relay_state(self):
"""Get the relay state."""
self.get_status()
try:
self.state = self.data['relay']
except TypeError:
self.state = False
return bool(self.state)
def get_temperature(self):
"""Get current temperature in celsius."""
try:
request = requests.get(
'{}/temp'.format(self.resource), timeout=self.timeout, allow_redirects=False)
self.temperature = request.json()['compensated']
return self.temperature
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
except ValueError:
raise exceptions.MyStromNotVersionTwoSwitch()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.