docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Bulk link users by email.
Arguments:
enterprise_customer (EnterpriseCustomer): learners will be linked to this Enterprise Customer instance
manage_learners_form (ManageLearnersForm): bound ManageLearners form instance
request (django.http.request.HttpRequest): HTTP Request instance
email_list (iterable): A list of pre-processed email addresses to handle using the form
|
def _handle_bulk_upload(cls, enterprise_customer, manage_learners_form, request, email_list=None):
errors = []
emails = set()
already_linked_emails = []
duplicate_emails = []
csv_file = manage_learners_form.cleaned_data[ManageLearnersForm.Fields.BULK_UPLOAD]
if email_list:
parsed_csv = [{ManageLearnersForm.CsvColumns.EMAIL: email} for email in email_list]
else:
parsed_csv = parse_csv(csv_file, expected_columns={ManageLearnersForm.CsvColumns.EMAIL})
try:
for index, row in enumerate(parsed_csv):
email = row[ManageLearnersForm.CsvColumns.EMAIL]
try:
already_linked = validate_email_to_link(email, ignore_existing=True)
except ValidationError as exc:
message = _("Error at line {line}: {message}\n").format(line=index + 1, message=exc)
errors.append(message)
else:
if already_linked:
already_linked_emails.append((email, already_linked.enterprise_customer))
elif email in emails:
duplicate_emails.append(email)
else:
emails.add(email)
except ValidationError as exc:
errors.append(exc)
if errors:
manage_learners_form.add_error(
ManageLearnersForm.Fields.GENERAL_ERRORS, ValidationMessages.BULK_LINK_FAILED
)
for error in errors:
manage_learners_form.add_error(ManageLearnersForm.Fields.BULK_UPLOAD, error)
return
# There were no errors. Now do the actual linking:
for email in emails:
EnterpriseCustomerUser.objects.link_user(enterprise_customer, email)
# Report what happened:
count = len(emails)
messages.success(request, ungettext(
"{count} new learner was added to {enterprise_customer_name}.",
"{count} new learners were added to {enterprise_customer_name}.",
count
).format(count=count, enterprise_customer_name=enterprise_customer.name))
this_customer_linked_emails = [
email for email, customer in already_linked_emails if customer == enterprise_customer
]
other_customer_linked_emails = [
email for email, __ in already_linked_emails if email not in this_customer_linked_emails
]
if this_customer_linked_emails:
messages.warning(
request,
_(
"The following learners were already associated with this Enterprise "
"Customer: {list_of_emails}"
).format(
list_of_emails=", ".join(this_customer_linked_emails)
)
)
if other_customer_linked_emails:
messages.warning(
request,
_(
"The following learners are already associated with "
"another Enterprise Customer. These learners were not "
"added to {enterprise_customer_name}: {list_of_emails}"
).format(
enterprise_customer_name=enterprise_customer.name,
list_of_emails=", ".join(other_customer_linked_emails),
)
)
if duplicate_emails:
messages.warning(
request,
_(
"The following duplicate email addresses were not added: "
"{list_of_emails}"
).format(
list_of_emails=", ".join(duplicate_emails)
)
)
# Build a list of all the emails that we can act on further; that is,
# emails that we either linked to this customer, or that were linked already.
all_processable_emails = list(emails) + this_customer_linked_emails
return all_processable_emails
| 434,773
|
Query the enrollment API and determine if a learner is enrolled in a given course run track.
Args:
user: The user whose enrollment needs to be checked
course_mode: The mode with which the enrollment should be checked
course_id: course id of the course where enrollment should be checked.
Returns:
Boolean: Whether or not enrollment exists
|
def is_user_enrolled(cls, user, course_id, course_mode):
enrollment_client = EnrollmentApiClient()
try:
enrollments = enrollment_client.get_course_enrollment(user.username, course_id)
if enrollments and course_mode == enrollments.get('mode'):
return True
except HttpClientError as exc:
logging.error(
'Error while checking enrollment status of user %(user)s: %(message)s',
dict(user=user.username, message=str(exc))
)
except KeyError as exc:
logging.warning(
'Error while parsing enrollment data of user %(user)s: %(message)s',
dict(user=user.username, message=str(exc))
)
return False
| 434,775
|
Accept a list of emails, and separate them into users that exist on OpenEdX and users who don't.
Args:
emails: An iterable of email addresses to split between existing and nonexisting
Returns:
users: Queryset of users who exist in the OpenEdX platform and who were in the list of email addresses
missing_emails: List of unique emails which were in the original list, but do not yet exist as users
|
def get_users_by_email(cls, emails):
users = User.objects.filter(email__in=emails)
present_emails = users.values_list('email', flat=True)
missing_emails = list(set(emails) - set(present_emails))
return users, missing_emails
| 434,776
|
Deduplicate any outgoing message requests, and send the remainder.
Args:
http_request: The HTTP request in whose response we want to embed the messages
message_requests: A list of undeduplicated messages in the form of tuples of message type
and text- for example, ('error', 'Something went wrong')
|
def send_messages(cls, http_request, message_requests):
deduplicated_messages = set(message_requests)
for msg_type, text in deduplicated_messages:
message_function = getattr(messages, msg_type)
message_function(http_request, text)
| 434,779
|
Notify learners about a program in which they've been enrolled.
Args:
enterprise_customer: The EnterpriseCustomer being linked to
program_details: Details about the specific program the learners were enrolled in
users: An iterable of the users or pending users who were enrolled
|
def notify_program_learners(cls, enterprise_customer, program_details, users):
program_name = program_details.get('title')
program_branding = program_details.get('type')
program_uuid = program_details.get('uuid')
lms_root_url = get_configuration_value_for_site(
enterprise_customer.site,
'LMS_ROOT_URL',
settings.LMS_ROOT_URL
)
program_path = urlquote(
'/dashboard/programs/{program_uuid}/?tpa_hint={tpa_hint}'.format(
program_uuid=program_uuid,
tpa_hint=enterprise_customer.identity_provider,
)
)
destination_url = '{site}/{login_or_register}?next={program_path}'.format(
site=lms_root_url,
login_or_register='{login_or_register}',
program_path=program_path
)
program_type = 'program'
program_start = get_earliest_start_date_from_program(program_details)
with mail.get_connection() as email_conn:
for user in users:
login_or_register = 'register' if isinstance(user, PendingEnterpriseCustomerUser) else 'login'
destination_url = destination_url.format(login_or_register=login_or_register)
send_email_notification_message(
user=user,
enrolled_in={
'name': program_name,
'url': destination_url,
'type': program_type,
'start': program_start,
'branding': program_branding,
},
enterprise_customer=enterprise_customer,
email_connection=email_conn
)
| 434,780
|
Create message for the users who were enrolled in a course or program.
Args:
users: An iterable of users who were successfully enrolled
enrolled_in (str): A string identifier for the course or program the users were enrolled in
Returns:
tuple: A 2-tuple containing a message type and message text
|
def get_success_enrollment_message(cls, users, enrolled_in):
enrolled_count = len(users)
return (
'success',
ungettext(
'{enrolled_count} learner was enrolled in {enrolled_in}.',
'{enrolled_count} learners were enrolled in {enrolled_in}.',
enrolled_count,
).format(
enrolled_count=enrolled_count,
enrolled_in=enrolled_in,
)
)
| 434,781
|
Create message for the users who were not able to be enrolled in a course or program.
Args:
users: An iterable of users who were not successfully enrolled
enrolled_in (str): A string identifier for the course or program with which enrollment was attempted
Returns:
tuple: A 2-tuple containing a message type and message text
|
def get_failed_enrollment_message(cls, users, enrolled_in):
failed_emails = [user.email for user in users]
return (
'error',
_(
'The following learners could not be enrolled in {enrolled_in}: {user_list}'
).format(
enrolled_in=enrolled_in,
user_list=', '.join(failed_emails),
)
)
| 434,782
|
Create message for the users who were enrolled in a course or program.
Args:
users: An iterable of PendingEnterpriseCustomerUsers who were successfully linked with a pending enrollment
enrolled_in (str): A string identifier for the course or program the pending users were linked to
Returns:
tuple: A 2-tuple containing a message type and message text
|
def get_pending_enrollment_message(cls, pending_users, enrolled_in):
pending_emails = [pending_user.user_email for pending_user in pending_users]
return (
'warning',
_(
"The following learners do not have an account on "
"{platform_name}. They have not been enrolled in "
"{enrolled_in}. When these learners create an account, they will "
"be enrolled automatically: {pending_email_list}"
).format(
platform_name=settings.PLATFORM_NAME,
enrolled_in=enrolled_in,
pending_email_list=', '.join(pending_emails),
)
)
| 434,783
|
Handle GET request - render linked learners list and "Link learner" form.
Arguments:
request (django.http.request.HttpRequest): Request instance
customer_uuid (str): Enterprise Customer UUID
Returns:
django.http.response.HttpResponse: HttpResponse
|
def get(self, request, customer_uuid):
context = self._build_context(request, customer_uuid)
manage_learners_form = ManageLearnersForm(
user=request.user,
enterprise_customer=context[self.ContextParameters.ENTERPRISE_CUSTOMER]
)
context.update({self.ContextParameters.MANAGE_LEARNERS_FORM: manage_learners_form})
return render(request, self.template, context)
| 434,785
|
Handle POST request - handle form submissions.
Arguments:
request (django.http.request.HttpRequest): Request instance
customer_uuid (str): Enterprise Customer UUID
Returns:
django.http.response.HttpResponse: HttpResponse
|
def post(self, request, customer_uuid):
enterprise_customer = EnterpriseCustomer.objects.get(uuid=customer_uuid) # pylint: disable=no-member
manage_learners_form = ManageLearnersForm(
request.POST,
request.FILES,
user=request.user,
enterprise_customer=enterprise_customer
)
# initial form validation - check that form data is well-formed
if manage_learners_form.is_valid():
email_field_as_bulk_input = split_usernames_and_emails(
manage_learners_form.cleaned_data[ManageLearnersForm.Fields.EMAIL_OR_USERNAME]
)
is_bulk_entry = len(email_field_as_bulk_input) > 1
# The form is valid. Call the appropriate helper depending on the mode:
mode = manage_learners_form.cleaned_data[ManageLearnersForm.Fields.MODE]
if mode == ManageLearnersForm.Modes.MODE_SINGULAR and not is_bulk_entry:
linked_learners = self._handle_singular(enterprise_customer, manage_learners_form)
elif mode == ManageLearnersForm.Modes.MODE_SINGULAR:
linked_learners = self._handle_bulk_upload(
enterprise_customer,
manage_learners_form,
request,
email_list=email_field_as_bulk_input
)
else:
linked_learners = self._handle_bulk_upload(enterprise_customer, manage_learners_form, request)
# _handle_form might add form errors, so we check if it is still valid
if manage_learners_form.is_valid():
course_details = manage_learners_form.cleaned_data.get(ManageLearnersForm.Fields.COURSE)
program_details = manage_learners_form.cleaned_data.get(ManageLearnersForm.Fields.PROGRAM)
notification_type = manage_learners_form.cleaned_data.get(ManageLearnersForm.Fields.NOTIFY)
notify = notification_type == ManageLearnersForm.NotificationTypes.BY_EMAIL
course_id = None
if course_details:
course_id = course_details['course_id']
if course_id or program_details:
course_mode = manage_learners_form.cleaned_data[ManageLearnersForm.Fields.COURSE_MODE]
self._enroll_users(
request=request,
enterprise_customer=enterprise_customer,
emails=linked_learners,
mode=course_mode,
course_id=course_id,
program_details=program_details,
notify=notify,
)
# Redirect to GET if everything went smooth.
manage_learners_url = reverse("admin:" + UrlNames.MANAGE_LEARNERS, args=(customer_uuid,))
search_keyword = self.get_search_keyword(request)
if search_keyword:
manage_learners_url = manage_learners_url + "?q=" + search_keyword
return HttpResponseRedirect(manage_learners_url)
# if something went wrong - display bound form on the page
context = self._build_context(request, customer_uuid)
context.update({self.ContextParameters.MANAGE_LEARNERS_FORM: manage_learners_form})
return render(request, self.template, context)
| 434,786
|
Handle DELETE request - handle unlinking learner.
Arguments:
request (django.http.request.HttpRequest): Request instance
customer_uuid (str): Enterprise Customer UUID
Returns:
django.http.response.HttpResponse: HttpResponse
|
def delete(self, request, customer_uuid):
# TODO: pylint acts stupid - find a way around it without suppressing
enterprise_customer = EnterpriseCustomer.objects.get(uuid=customer_uuid) # pylint: disable=no-member
email_to_unlink = request.GET["unlink_email"]
try:
EnterpriseCustomerUser.objects.unlink_user(
enterprise_customer=enterprise_customer, user_email=email_to_unlink
)
except (EnterpriseCustomerUser.DoesNotExist, PendingEnterpriseCustomerUser.DoesNotExist):
message = _("Email {email} is not associated with Enterprise "
"Customer {ec_name}").format(
email=email_to_unlink, ec_name=enterprise_customer.name)
return HttpResponse(message, content_type="application/json", status=404)
return HttpResponse(
json.dumps({}),
content_type="application/json"
)
| 434,787
|
Send xAPI analytics data of the enterprise learners to the given LRS.
Arguments:
lrs_configuration (XAPILRSConfiguration): Configuration object containing LRS configurations
of the LRS where to send xAPI learner analytics.
days (int): Include course enrollment of this number of days.
|
def send_xapi_statements(self, lrs_configuration, days):
persistent_course_grades = self.get_course_completions(lrs_configuration.enterprise_customer, days)
users = self.prefetch_users(persistent_course_grades)
course_overviews = self.prefetch_courses(persistent_course_grades)
for persistent_course_grade in persistent_course_grades:
try:
user = users.get(persistent_course_grade.user_id)
course_overview = course_overviews.get(persistent_course_grade.course_id)
course_grade = CourseGradeFactory().read(user, course_key=persistent_course_grade.course_id)
send_course_completion_statement(lrs_configuration, user, course_overview, course_grade)
except ClientError:
LOGGER.exception(
'Client error while sending course completion to xAPI for'
' enterprise customer {enterprise_customer}.'.format(
enterprise_customer=lrs_configuration.enterprise_customer.name
)
)
| 434,794
|
Get course completions via PersistentCourseGrade for all the learners of given enterprise customer.
Arguments:
enterprise_customer (EnterpriseCustomer): Include Course enrollments for learners
of this enterprise customer.
days (int): Include course enrollment of this number of days.
Returns:
(list): A list of PersistentCourseGrade objects.
|
def get_course_completions(self, enterprise_customer, days):
return PersistentCourseGrade.objects.filter(
passed_timestamp__gt=datetime.datetime.now() - datetime.timedelta(days=days)
).filter(
user_id__in=enterprise_customer.enterprise_customer_users.values_list('user_id', flat=True)
)
| 434,795
|
Prefetch Users from the list of user_ids present in the persistent_course_grades.
Arguments:
persistent_course_grades (list): A list of PersistentCourseGrade.
Returns:
(dict): A dictionary containing user_id to user mapping.
|
def prefetch_users(persistent_course_grades):
users = User.objects.filter(
id__in=[grade.user_id for grade in persistent_course_grades]
)
return {
user.id: user for user in users
}
| 434,796
|
Get template of catalog admin url.
URL template will contain a placeholder '{catalog_id}' for catalog id.
Arguments:
mode e.g. change/add.
Returns:
A string containing template for catalog url.
Example:
>>> get_catalog_admin_url_template('change')
"http://localhost:18381/admin/catalogs/catalog/{catalog_id}/change/"
|
def get_catalog_admin_url_template(mode='change'):
api_base_url = getattr(settings, "COURSE_CATALOG_API_URL", "")
# Extract FQDN (Fully Qualified Domain Name) from API URL.
match = re.match(r"^(?P<fqdn>(?:https?://)?[^/]+)", api_base_url)
if not match:
return ""
# Return matched FQDN from catalog api url appended with catalog admin path
if mode == 'change':
return match.group("fqdn").rstrip("/") + "/admin/catalogs/catalog/{catalog_id}/change/"
elif mode == 'add':
return match.group("fqdn").rstrip("/") + "/admin/catalogs/catalog/add/"
| 434,801
|
Create HTML and plaintext message bodies for a notification.
We receive a context with data we can use to render, as well as an optional site
template configration - if we don't get a template configuration, we'll use the
standard, built-in template.
Arguments:
template_context (dict): A set of data to render
template_configuration: A database-backed object with templates
stored that can be used to render a notification.
|
def build_notification_message(template_context, template_configuration=None):
if (
template_configuration is not None and
template_configuration.html_template and
template_configuration.plaintext_template
):
plain_msg, html_msg = template_configuration.render_all_templates(template_context)
else:
plain_msg = render_to_string(
'enterprise/emails/user_notification.txt',
template_context
)
html_msg = render_to_string(
'enterprise/emails/user_notification.html',
template_context
)
return plain_msg, html_msg
| 434,802
|
Return enterprise customer instance for given user.
Some users are associated with an enterprise customer via `EnterpriseCustomerUser` model,
1. if given user is associated with any enterprise customer, return enterprise customer.
2. otherwise return `None`.
Arguments:
auth_user (contrib.auth.User): Django User
Returns:
(EnterpriseCustomer): enterprise customer associated with the current user.
|
def get_enterprise_customer_for_user(auth_user):
EnterpriseCustomerUser = apps.get_model('enterprise', 'EnterpriseCustomerUser') # pylint: disable=invalid-name
try:
return EnterpriseCustomerUser.objects.get(user_id=auth_user.id).enterprise_customer # pylint: disable=no-member
except EnterpriseCustomerUser.DoesNotExist:
return None
| 434,806
|
Return the object for EnterpriseCustomerUser.
Arguments:
user_id (str): user identifier
enterprise_uuid (UUID): Universally unique identifier for the enterprise customer.
Returns:
(EnterpriseCustomerUser): enterprise customer user record
|
def get_enterprise_customer_user(user_id, enterprise_uuid):
EnterpriseCustomerUser = apps.get_model('enterprise', 'EnterpriseCustomerUser') # pylint: disable=invalid-name
try:
return EnterpriseCustomerUser.objects.get( # pylint: disable=no-member
enterprise_customer__uuid=enterprise_uuid,
user_id=user_id
)
except EnterpriseCustomerUser.DoesNotExist:
return None
| 434,807
|
Return track selection url for the given course.
Arguments:
course_run (dict): A dictionary containing course run metadata.
query_parameters (dict): A dictionary containing query parameters to be added to course selection url.
Raises:
(KeyError): Raised when course run dict does not have 'key' key.
Returns:
(str): Course track selection url.
|
def get_course_track_selection_url(course_run, query_parameters):
try:
course_root = reverse('course_modes_choose', kwargs={'course_id': course_run['key']})
except KeyError:
LOGGER.exception(
"KeyError while parsing course run data.\nCourse Run: \n[%s]", course_run,
)
raise
url = '{}{}'.format(
settings.LMS_ROOT_URL,
course_root
)
course_run_url = update_query_parameters(url, query_parameters)
return course_run_url
| 434,808
|
Return url with updated query parameters.
Arguments:
url (str): Original url whose query parameters need to be updated.
query_parameters (dict): A dictionary containing query parameters to be added to course selection url.
Returns:
(slug): slug identifier for the identity provider that can be used for identity verification of
users associated the enterprise customer of the given user.
|
def update_query_parameters(url, query_parameters):
scheme, netloc, path, query_string, fragment = urlsplit(url)
url_params = parse_qs(query_string)
# Update url query parameters
url_params.update(query_parameters)
return urlunsplit(
(scheme, netloc, path, urlencode(sorted(url_params.items()), doseq=True), fragment),
)
| 434,809
|
Filter audit course modes out if the enterprise customer has not enabled the 'Enable audit enrollment' flag.
Arguments:
enterprise_customer: The EnterpriseCustomer that the enrollment was created using.
course_modes: iterable with dictionaries containing a required 'mode' key
|
def filter_audit_course_modes(enterprise_customer, course_modes):
audit_modes = getattr(settings, 'ENTERPRISE_COURSE_ENROLLMENT_AUDIT_MODES', ['audit'])
if not enterprise_customer.enable_audit_enrollment:
return [course_mode for course_mode in course_modes if course_mode['mode'] not in audit_modes]
return course_modes
| 434,810
|
Given an EnterpriseCustomer UUID, return the corresponding EnterpriseCustomer or raise a 404.
Arguments:
enterprise_uuid (str): The UUID (in string form) of the EnterpriseCustomer to fetch.
Returns:
(EnterpriseCustomer): The EnterpriseCustomer given the UUID.
|
def get_enterprise_customer_or_404(enterprise_uuid):
EnterpriseCustomer = apps.get_model('enterprise', 'EnterpriseCustomer') # pylint: disable=invalid-name
try:
enterprise_uuid = UUID(enterprise_uuid)
return EnterpriseCustomer.objects.get(uuid=enterprise_uuid) # pylint: disable=no-member
except (TypeError, ValueError, EnterpriseCustomer.DoesNotExist):
LOGGER.error('Unable to find enterprise customer for UUID: [%s]', enterprise_uuid)
raise Http404
| 434,811
|
Traverse a paginated API response.
Extracts and concatenates "results" (list of dict) returned by DRF-powered
APIs.
Arguments:
response (Dict): Current response dict from service API
endpoint (slumber Resource object): slumber Resource object from edx-rest-api-client
Returns:
list of dict.
|
def traverse_pagination(response, endpoint):
results = response.get('results', [])
next_page = response.get('next')
while next_page:
querystring = parse_qs(urlparse(next_page).query, keep_blank_values=True)
response = endpoint.get(**querystring)
results += response.get('results', [])
next_page = response.get('next')
return results
| 434,813
|
Strip all tags from a string except those tags provided in `allowed_tags` parameter.
Args:
text (str): string to strip html tags from
allowed_tags (list): allowed list of html tags
Returns: a string without html tags
|
def strip_html_tags(text, allowed_tags=None):
if text is None:
return
if allowed_tags is None:
allowed_tags = ALLOWED_TAGS
return bleach.clean(text, tags=allowed_tags, attributes=['id', 'class', 'style', 'href', 'title'], strip=True)
| 434,826
|
Initialize :class:`MultipleProgramMatchError`.
Arguments:
programs_matched (int): number of programs matched where one proram was expected.
args (iterable): variable arguments
kwargs (dict): keyword arguments
|
def __init__(self, programs_matched, *args, **kwargs):
super(MultipleProgramMatchError, self).__init__(*args, **kwargs)
self.programs_matched = programs_matched
| 434,829
|
Save xAPI statement.
Arguments:
statement (EnterpriseStatement): xAPI Statement to send to the LRS.
Raises:
ClientError: If xAPI statement fails to save.
|
def save_statement(self, statement):
response = self.lrs.save_statement(statement)
if not response:
raise ClientError('EnterpriseXAPIClient request failed.')
| 434,833
|
Use the ``SAPSuccessFactorsAPIClient`` for content metadata transmission to SAPSF.
Arguments:
enterprise_configuration (required): SAPSF configuration connecting an enterprise to an integrated channel.
client: The REST API client that will fetch data from integrated channel.
|
def __init__(self, enterprise_configuration, client=SAPSuccessFactorsAPIClient):
self.enterprise_configuration = enterprise_configuration
self.client = client(enterprise_configuration) if client else None
| 434,835
|
Return an export csv action.
Arguments:
description (string): action description
fields ([string]): list of model fields to include
header (bool): whether or not to output the column names as the first row
|
def export_as_csv_action(description="Export selected objects as CSV file", fields=None, header=True):
# adapted from https://gist.github.com/mgerring/3645889
def export_as_csv(modeladmin, request, queryset): # pylint: disable=unused-argument
opts = modeladmin.model._meta
if not fields:
field_names = [field.name for field in opts.fields]
else:
field_names = fields
response = HttpResponse(content_type="text/csv")
response["Content-Disposition"] = "attachment; filename={filename}.csv".format(
filename=str(opts).replace(".", "_")
)
writer = unicodecsv.writer(response, encoding="utf-8")
if header:
writer.writerow(field_names)
for obj in queryset:
row = []
for field_name in field_names:
field = getattr(obj, field_name)
if callable(field):
value = field()
else:
value = field
if value is None:
row.append("[Not Set]")
elif not value and isinstance(value, string_types):
row.append("[Empty]")
else:
row.append(value)
writer.writerow(row)
return response
export_as_csv.short_description = description
return export_as_csv
| 434,845
|
Cleanup the pin by closing and unexporting it.
Args:
pin (int, optional): either the pin to clean up or None (default).
If None, clean up all pins.
assert_exists: if True, raise a ValueError if the pin was not
setup. Otherwise, this function is a NOOP.
|
def cleanup(pin=None, assert_exists=False):
if pin is None:
# Take a list of keys because we will be deleting from _open
for pin in list(_open):
cleanup(pin)
return
if not isinstance(pin, int):
raise TypeError("pin must be an int, got: {}".format(pin))
state = _open.get(pin)
if state is None:
if assert_exists:
raise ValueError("pin {} was not setup".format(pin))
return
state.value.close()
state.direction.close()
if os.path.exists(gpiopath(pin)):
log.debug("Unexporting pin {0}".format(pin))
with _export_lock:
with open(pjoin(gpio_root, 'unexport'), 'w') as f:
_write(f, pin)
del _open[pin]
| 434,850
|
Setup pin with mode IN or OUT.
Args:
pin (int):
mode (str): use either gpio.OUT or gpio.IN
pullup (None): rpio compatibility. If anything but None, raises
value Error
pullup (bool, optional): Initial pin value. Default is False
|
def setup(pin, mode, pullup=None, initial=False):
if pullup is not None:
raise ValueError("sysfs does not support pullups")
if mode not in (IN, OUT, LOW, HIGH):
raise ValueError(mode)
log.debug("Setup {0}: {1}".format(pin, mode))
f = _open[pin].direction
_write(f, mode)
if mode == OUT:
if initial:
set(pin, 1)
else:
set(pin, 0)
| 434,851
|
Get the library version info.
Args:
no argument
Returns:
4-element tuple with the following components:
-major version number (int)
-minor version number (int)
-complete library version number (int)
-additional information (string)
C library equivalent : Hgetlibversion
|
def getlibversion():
status, major_v, minor_v, release, info = _C.Hgetlibversion()
_checkErr('getlibversion', status, "cannot get lib version")
return major_v, minor_v, release, info
| 435,001
|
Get file version info.
Args:
no argument
Returns:
4-element tuple with the following components:
-major version number (int)
-minor version number (int)
-complete library version number (int)
-additional information (string)
C library equivalent : Hgetlibversion
|
def getfileversion(self):
status, major_v, minor_v, release, info = _C.Hgetfileversion(self._id)
_checkErr('getfileversion', status, "cannot get file version")
return major_v, minor_v, release, info
| 435,003
|
Write a string of data to file by filename and folder.
Args:
folder: Target folder (e.g. c:/ladybug).
fname: File name (e.g. testPts.pts).
data: Any data as string.
mkdir: Set to True to create the directory if doesn't exist (Default: False).
|
def write_to_file_by_name(folder, fname, data, mkdir=False):
if not os.path.isdir(folder):
if mkdir:
preparedir(folder)
else:
created = preparedir(folder, False)
if not created:
raise ValueError("Failed to find %s." % folder)
file_path = os.path.join(folder, fname)
with open(file_path, writemode) as outf:
try:
outf.write(str(data))
return file_path
except Exception as e:
raise IOError("Failed to write %s to file:\n\t%s" % (fname, str(e)))
| 435,175
|
Write a string of data to file.
Args:
file_path: Full path for a valid file path (e.g. c:/ladybug/testPts.pts)
data: Any data as string
mkdir: Set to True to create the directory if doesn't exist (Default: False)
|
def write_to_file(file_path, data, mkdir=False):
folder, fname = os.path.split(file_path)
return write_to_file_by_name(folder, fname, data, mkdir)
| 435,176
|
Download a file to a directory.
Args:
url: A string to a valid URL.
target_folder: Target folder for download (e.g. c:/ladybug)
file_name: File name (e.g. testPts.zip).
mkdir: Set to True to create the directory if doesn't exist (Default: False)
|
def download_file_by_name(url, target_folder, file_name, mkdir=False):
# headers to "spoof" the download as coming from a browser (needed for E+ site)
__hdr__ = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 '
'(KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,'
'application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
# create the target directory.
if not os.path.isdir(target_folder):
if mkdir:
preparedir(target_folder)
else:
created = preparedir(target_folder, False)
if not created:
raise ValueError("Failed to find %s." % target_folder)
file_path = os.path.join(target_folder, file_name)
if (sys.version_info < (3, 0)):
_download_py2(url, file_path, __hdr__)
else:
_download_py3(url, file_path, __hdr__)
| 435,181
|
Write a string of data to file.
Args:
url: A string to a valid URL.
file_path: Full path to intended download location (e.g. c:/ladybug/testPts.pts)
mkdir: Set to True to create the directory if doesn't exist (Default: False)
|
def download_file(url, file_path, mkdir=False):
folder, fname = os.path.split(file_path)
return download_file_by_name(url, folder, fname, mkdir)
| 435,182
|
Unzip a compressed file.
Args:
source_file: Full path to a valid compressed file (e.g. c:/ladybug/testPts.zip)
dest_dir: Target folder to extract to (e.g. c:/ladybug).
Default is set to the same directory as the source file.
mkdir: Set to True to create the directory if doesn't exist (Default: False)
|
def unzip_file(source_file, dest_dir=None, mkdir=False):
# set default dest_dir and create it if need be.
if dest_dir is None:
dest_dir, fname = os.path.split(source_file)
elif not os.path.isdir(dest_dir):
if mkdir:
preparedir(dest_dir)
else:
created = preparedir(dest_dir, False)
if not created:
raise ValueError("Failed to find %s." % dest_dir)
# extract files to destination
with zipfile.ZipFile(source_file) as zf:
for member in zf.infolist():
words = member.filename.split('\\')
for word in words[:-1]:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir, ''):
continue
dest_dir = os.path.join(dest_dir, word)
zf.extract(member, dest_dir)
| 435,183
|
Load a CSV file into a Python matrix of strings.
Args:
csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)
|
def csv_to_matrix(csv_file_path):
mtx = []
with open(csv_file_path) as csv_data_file:
for row in csv_data_file:
mtx.append(row.split(','))
return mtx
| 435,184
|
Load a CSV file consisting only of numbers into a Python matrix of floats.
Args:
csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)
|
def csv_to_num_matrix(csv_file_path):
mtx = []
with open(csv_file_path) as csv_data_file:
for row in csv_data_file:
mtx.append([float(val) for val in row.split(',')])
return mtx
| 435,185
|
Initalize the class.
Args:
file_path: Address to a local .stat file.
|
def __init__(self, file_path):
if file_path is not None:
if not os.path.isfile(file_path):
raise ValueError(
'Cannot find an stat file at {}'.format(file_path))
if not file_path.lower().endswith('stat'):
raise TypeError('{} is not an .stat file.'.format(file_path))
self._file_path = os.path.normpath(file_path)
# defaults empty state for certain parameters
self._winter_des_day_dict = {}
self._summer_des_day_dict = {}
self._monthly_wind_dirs = []
# import the data from the file
if file_path is not None:
self._import_data()
| 435,186
|
Create a data type from a dictionary.
Args:
data: Data as a dictionary.
{
"name": data type name of the data type as a string
"data_type": the class name of the data type as a string
"base_unit": the base unit of the data type
}
|
def from_json(cls, data):
assert 'name' in data, 'Required keyword "name" is missing!'
assert 'data_type' in data, 'Required keyword "data_type" is missing!'
if cls._type_enumeration is None:
cls._type_enumeration = _DataTypeEnumeration(import_modules=False)
if data['data_type'] == 'GenericType':
assert 'base_unit' in data, \
'Keyword "base_unit" is missing and is required for GenericType.'
return cls._type_enumeration._GENERICTYPE(data['name'], data['base_unit'])
elif data['data_type'] in cls._type_enumeration._TYPES:
clss = cls._type_enumeration._TYPES[data['data_type']]
if data['data_type'] == data['name'].title().replace(' ', ''):
return clss()
else:
instance = clss()
instance._name = data['name']
return instance
else:
raise ValueError(
'Data Type {} could not be recognized'.format(data['data_type']))
| 435,207
|
Check if a certain unit is acceptable for the data type.
Args:
unit: A text string representing the abbreviated unit.
raise_exception: Set to True to raise an exception if not acceptable.
|
def is_unit_acceptable(self, unit, raise_exception=True):
_is_acceptable = unit in self.units
if _is_acceptable or raise_exception is False:
return _is_acceptable
else:
raise ValueError(
'{0} is not an acceptable unit type for {1}. '
'Choose from the following: {2}'.format(
unit, self.__class__.__name__, self.units
)
)
| 435,208
|
Check if a list of values is within physically/mathematically possible range.
Args:
values: A list of values.
unit: The unit of the values. If not specified, the default metric
unit will be assumed.
raise_exception: Set to True to raise an exception if not in range.
|
def is_in_range(self, values, unit=None, raise_exception=True):
self._is_numeric(values)
if unit is None or unit == self.units[0]:
minimum = self.min
maximum = self.max
else:
namespace = {'self': self}
self.is_unit_acceptable(unit, True)
min_statement = "self._{}_to_{}(self.min)".format(
self._clean(self.units[0]), self._clean(unit))
max_statement = "self._{}_to_{}(self.max)".format(
self._clean(self.units[0]), self._clean(unit))
minimum = eval(min_statement, namespace)
maximum = eval(max_statement, namespace)
for value in values:
if value < minimum or value > maximum:
if not raise_exception:
return False
else:
raise ValueError(
'{0} should be between {1} and {2}. Got {3}'.format(
self.__class__.__name__, self.min, self.max, value
)
)
return True
| 435,209
|
Initiate Ladybug header for lists.
Args:
data_type: A DataType object. (e.g. Temperature)
unit: data_type unit (Default: None)
analysis_period: A Ladybug analysis period (Defualt: None)
metadata: Optional dictionary of additional metadata,
containing information such as 'source', 'city', or 'zone'.
|
def __init__(self, data_type, unit=None,
analysis_period=None, metadata=None):
assert hasattr(data_type, 'isDataType'), \
'data_type must be a Ladybug DataType. Got {}'.format(type(data_type))
if unit is None:
unit = data_type.units[0]
else:
data_type.is_unit_acceptable(unit)
if analysis_period is not None:
assert hasattr(analysis_period, 'isAnalysisPeriod'), \
'analysis_period must be a Ladybug AnalysisPeriod. Got {}'.format(
type(analysis_period))
if metadata is not None:
assert isinstance(metadata, dict), \
'metadata must be a dictionary. Got {}'.format(type(metadata))
self._data_type = data_type
self._unit = unit
self._analysis_period = analysis_period
self._metadata = metadata or {}
| 435,216
|
Create a header from a dictionary.
Args:
data: {
"data_type": {}, //Type of data (e.g. Temperature)
"unit": string,
"analysis_period": {} // A Ladybug AnalysisPeriod
"metadata": {}, // A dictionary of metadata
}
|
def from_json(cls, data):
# assign default values
assert 'data_type' in data, 'Required keyword "data_type" is missing!'
keys = ('data_type', 'unit', 'analysis_period', 'metadata')
for key in keys:
if key not in data:
data[key] = None
data_type = DataTypeBase.from_json(data['data_type'])
ap = AnalysisPeriod.from_json(data['analysis_period'])
return cls(data_type, data['unit'], ap, data['metadata'])
| 435,217
|
Determine the bins for the DIRINT coefficients.
Args:
ktp : Altitude-independent clearness index
alt : Solar altitude angle
w : precipitable water estimated from surface dew-point temperature
dktp : stability index
Returns:
tuple of ktp_bin, alt_bin, w_bin, dktp_bin
|
def _dirint_bins(ktp, alt, w, dktp):
it = range(len(ktp))
# Create kt_prime bins
ktp_bin = [-1] * len(ktp)
ktp_bin = [0 if ktp[i] >= 0 and ktp[i] < 0.24 else ktp_bin[i] for i in it]
ktp_bin = [1 if ktp[i] >= 0.24 and ktp[i] < 0.4 else ktp_bin[i] for i in it]
ktp_bin = [2 if ktp[i] >= 0.4 and ktp[i] < 0.56 else ktp_bin[i] for i in it]
ktp_bin = [3 if ktp[i] >= 0.56 and ktp[i] < 0.7 else ktp_bin[i] for i in it]
ktp_bin = [4 if ktp[i] >= 0.7 and ktp[i] < 0.8 else ktp_bin[i] for i in it]
ktp_bin = [5 if ktp[i] >= 0.8 and ktp[i] <= 1 else ktp_bin[i] for i in it]
# Create altitude angle bins
alt_bin = [-1] * len(alt)
alt_bin = [0 if alt[i] <= 90 and alt[i] > 65 else alt_bin[i] for i in it]
alt_bin = [1 if alt[i] <= 65 and alt[i] > 50 else alt_bin[i] for i in it]
alt_bin = [2 if alt[i] <= 50 and alt[i] > 35 else alt_bin[i] for i in it]
alt_bin = [3 if alt[i] <= 35 and alt[i] > 20 else alt_bin[i] for i in it]
alt_bin = [4 if alt[i] <= 20 and alt[i] > 10 else alt_bin[i] for i in it]
alt_bin = [5 if alt[i] <= 10 else alt_bin[i] for i in it]
# Create the bins for w based on dew point temperature
w_bin = [-1] * len(w)
w_bin = [0 if w[i] >= 0 and w[i] < 1 else w_bin[i] for i in it]
w_bin = [1 if w[i] >= 1 and w[i] < 2 else w_bin[i] for i in it]
w_bin = [2 if w[i] >= 2 and w[i] < 3 else w_bin[i] for i in it]
w_bin = [3 if w[i] >= 3 else w_bin[i] for i in it]
w_bin = [4 if w[i] == -1 else w_bin[i] for i in it]
# Create delta_kt_prime binning.
dktp_bin = [-1] * len(dktp)
dktp_bin = [0 if dktp[i] >= 0 and dktp[i] < 0.015 else dktp_bin[i] for i in it]
dktp_bin = [1 if dktp[i] >= 0.015 and dktp[i] < 0.035 else dktp_bin[i] for i in it]
dktp_bin = [2 if dktp[i] >= 0.035 and dktp[i] < 0.07 else dktp_bin[i] for i in it]
dktp_bin = [3 if dktp[i] >= 0.07 and dktp[i] < 0.15 else dktp_bin[i] for i in it]
dktp_bin = [4 if dktp[i] >= 0.15 and dktp[i] < 0.3 else dktp_bin[i] for i in it]
dktp_bin = [5 if dktp[i] >= 0.3 and dktp[i] <= 1 else dktp_bin[i] for i in it]
dktp_bin = [6 if dktp[i] == -1 else dktp_bin[i] for i in it]
return ktp_bin, alt_bin, w_bin, dktp_bin
| 435,228
|
Calculate Kn for `disc`
Args:
clearness_index : numeric
airmass : numeric
max_airmass : float
airmass > max_airmass is set to max_airmass before being used
in calculating Kn.
Returns:
Kn : numeric
am : numeric
airmass used in the calculation of Kn. am <= max_airmass.
|
def _disc_kn(clearness_index, airmass, max_airmass=12):
# short names for equations
kt = clearness_index
am = airmass
am = min(am, max_airmass) # GH 450
# powers of kt will be used repeatedly, so compute only once
kt2 = kt * kt # about the same as kt ** 2
kt3 = kt2 * kt # 5-10x faster than kt ** 3
if kt <= 0.6:
a = 0.512 - 1.56*kt + 2.286*kt2 - 2.222*kt3
b = 0.37 + 0.962*kt
c = -0.28 + 0.932*kt - 2.048*kt2
else:
a = -5.743 + 21.77*kt - 27.49*kt2 + 11.56*kt3
b = 41.4 - 118.5*kt + 66.05*kt2 + 31.9*kt3
c = -47.01 + 184.2*kt - 222.0*kt2 + 73.81*kt3
delta_kn = a + b * math.exp(c*am)
Knc = 0.866 - 0.122*am + 0.0121*am**2 - 0.000653*am**3 + 1.4e-05*am**4
Kn = Knc - delta_kn
return Kn, am
| 435,230
|
Create a Data Collection from a dictionary.
Args:
{
"header": A Ladybug Header,
"values": An array of values,
"datetimes": An array of datetimes,
"validated_a_period": Boolean for whether header analysis_period is valid
}
|
def from_json(cls, data):
assert 'header' in data, 'Required keyword "header" is missing!'
assert 'values' in data, 'Required keyword "values" is missing!'
assert 'datetimes' in data, 'Required keyword "datetimes" is missing!'
collection = cls(Header.from_json(data['header']), data['values'],
[DateTime.from_json(dat) for dat in data['datetimes']])
if 'validated_a_period' in data:
collection._validated_a_period = data['validated_a_period']
return collection
| 435,239
|
Filter a Data Collection based on an analysis period.
Args:
analysis period: A Ladybug analysis period
Return:
A new Data Collection with filtered data
|
def filter_by_analysis_period(self, analysis_period):
self._check_analysis_period(analysis_period)
_filtered_data = self.filter_by_moys(analysis_period.moys)
_filtered_data.header._analysis_period = analysis_period
return _filtered_data
| 435,242
|
Filter the Data Collection based on an analysis period.
Args:
hoys: A List of hours of the year 0..8759
Return:
A new Data Collection with filtered data
|
def filter_by_hoys(self, hoys):
_moys = tuple(int(hour * 60) for hour in hoys)
return self.filter_by_moys(_moys)
| 435,243
|
Filter the Data Collection based on a list of minutes of the year.
Args:
moys: A List of minutes of the year [0..8759 * 60]
Return:
A new Data Collection with filtered data
|
def filter_by_moys(self, moys):
_filt_values, _filt_datetimes = self._filter_by_moys_slow(moys)
collection = HourlyDiscontinuousCollection(
self.header.duplicate(), _filt_values, _filt_datetimes)
collection._validated_a_period = self._validated_a_period
return collection
| 435,244
|
Initialize hourly discontinuous collection.
Args:
header: A Ladybug Header object. Note that this header
must have an AnalysisPeriod on it that aligns with the
list of values.
values: A list of values. Note that the length of this list
must align with the AnalysisPeriod on the header.
|
def __init__(self, header, values):
assert isinstance(header, Header), \
'header must be a Ladybug Header object. Got {}'.format(type(header))
assert isinstance(header.analysis_period, AnalysisPeriod), \
'header of {} must have an analysis_period.'.format(self.__class__.__name__)
assert header.analysis_period.st_hour == 0, \
'analysis_period start hour of {} must be 0. Got {}'.format(
self.__class__.__name__, header.analysis_period.st_hour)
assert header.analysis_period.end_hour == 23, \
'analysis_period end hour of {} must be 23. Got {}'.format(
self.__class__.__name__, header.analysis_period.end_hour)
self._header = header
self.values = values
self._datetimes = None
self._validated_a_period = True
| 435,257
|
Create a Data Collection from a dictionary.
Args:
{
"header": A Ladybug Header,
"values": An array of values,
}
|
def from_json(cls, data):
assert 'header' in data, 'Required keyword "header" is missing!'
assert 'values' in data, 'Required keyword "values" is missing!'
return cls(Header.from_json(data['header']), data['values'])
| 435,258
|
Filter the Data Collection based on a conditional statement.
Args:
statement: A conditional statement as a string (e.g. a > 25 and a%5 == 0).
The variable should always be named as 'a' (without quotations).
Return:
A new Data Collection containing only the filtered data
|
def filter_by_conditional_statement(self, statement):
_filt_values, _filt_datetimes = self._filter_by_statement(statement)
collection = HourlyDiscontinuousCollection(
self.header.duplicate(), _filt_values, _filt_datetimes)
collection._validated_a_period = True
return collection
| 435,261
|
Filter the Data Collection based on a list of booleans.
Args:
pattern: A list of True/False values. Typically, this is a list
with a length matching the length of the Data Collections values
but it can also be a pattern to be repeated over the Data Collection.
Return:
A new Data Collection with filtered data
|
def filter_by_pattern(self, pattern):
_filt_values, _filt_datetimes = self._filter_by_pattern(pattern)
collection = HourlyDiscontinuousCollection(
self.header.duplicate(), _filt_values, _filt_datetimes)
collection._validated_a_period = True
return collection
| 435,262
|
Filter the Data Collection based on an analysis period.
Args:
analysis period: A Ladybug analysis period
Return:
A new Data Collection with filtered data
|
def filter_by_analysis_period(self, analysis_period):
self._check_analysis_period(analysis_period)
analysis_period = self._get_analysis_period_subset(analysis_period)
if analysis_period.st_hour == 0 and analysis_period.end_hour == 23:
# We can still return an Hourly Continuous Data Collection
t_s = 60 / analysis_period.timestep
st_ind = int((analysis_period.st_time.moy / t_s) -
(self.header.analysis_period.st_time.moy / t_s))
end_ind = int((analysis_period.end_time.moy / t_s) -
(analysis_period.st_time.moy / t_s) + st_ind + 1)
if end_ind > st_ind:
_filt_values = self._values[st_ind:end_ind]
else:
_filt_values = self._values[st_ind:] + self._values[:end_ind]
_filt_header = self.header.duplicate()
_filt_header._analysis_period = analysis_period
return HourlyContinuousCollection(_filt_header, _filt_values)
else:
# Filter using HOYs and the result cannot be continuous
_filtered_data = self.filter_by_moys(analysis_period.moys)
_filtered_data.header._analysis_period = analysis_period
return _filtered_data
| 435,263
|
Filter the Data Collection based onva list of hoys.
Args:
hoys: A List of hours of the year 0..8759
Return:
A new Data Collection with filtered data
|
def filter_by_hoys(self, hoys):
existing_hoys = self.header.analysis_period.hoys
hoys = [h for h in hoys if h in existing_hoys]
_moys = tuple(int(hour * 60) for hour in hoys)
return self.filter_by_moys(_moys)
| 435,264
|
Filter the Data Collection based on a list of minutes of the year.
Args:
moys: A List of minutes of the year [0..8759 * 60]
Return:
A new Data Collection with filtered data
|
def filter_by_moys(self, moys):
t_s = 60 / self.header.analysis_period.timestep
st_ind = self.header.analysis_period.st_time.moy / t_s
if self.header.analysis_period.is_reversed is False:
_filt_indices = [int(moy / t_s - st_ind) for moy in moys]
else:
if self.header.analysis_period.is_leap_year is False:
eoy_ind = 8759 * self.header.analysis_period.timestep - st_ind
else:
eoy_ind = 8783 * self.header.analysis_period.timestep - st_ind
_filt_indices = []
for moy in moys:
ind = moy / t_s
if ind > st_ind:
_filt_indices.append(int(ind - st_ind))
else:
_filt_indices.append(int(ind + eoy_ind))
_filt_values = [self._values[i] for i in _filt_indices]
_filt_datetimes = [self.datetimes[i] for i in _filt_indices]
_filt_header = self.header.duplicate()
coll = HourlyDiscontinuousCollection(_filt_header, _filt_values, _filt_datetimes)
coll._validated_a_period = True
return coll
| 435,265
|
Check if this Data Collection is aligned with another.
Aligned Data Collections are of the same Data Collection class,
have the same number of values and have matching datetimes.
Args:
data_collection: The Data Collection which you want to test if this
collection is aligned with.
Return:
True if collections are aligned, Fale if not aligned
|
def is_collection_aligned(self, data_collection):
if self._collection_type != data_collection._collection_type:
return False
elif len(self.values) != len(data_collection.values):
return False
elif self.header.analysis_period != data_collection.header.analysis_period:
return False
return True
| 435,269
|
Filter the Data Collection based on an analysis period.
Args:
analysis period: A Ladybug analysis period
Return:
A new Data Collection with filtered data
|
def filter_by_analysis_period(self, analysis_period):
self._check_analysis_period(analysis_period)
_filtered_data = self.filter_by_doys(analysis_period.doys_int)
_filtered_data.header._analysis_period = analysis_period
return _filtered_data
| 435,273
|
Filter the Data Collection based on a list of days of the year (as integers).
Args:
doys: A List of days of the year [1..365]
Return:
A new Data Collection with filtered data
|
def filter_by_doys(self, doys):
_filt_values = []
_filt_datetimes = []
for i, d in enumerate(self.datetimes):
if d in doys:
_filt_datetimes.append(d)
_filt_values.append(self._values[i])
_filt_header = self.header.duplicate()
return DailyCollection(_filt_header, _filt_values, _filt_datetimes)
| 435,274
|
Filter the Data Collection based on an analysis period.
Args:
analysis period: A Ladybug analysis period
Return:
A new Data Collection with filtered data
|
def filter_by_analysis_period(self, analysis_period):
_filtered_data = self.filter_by_months(analysis_period.months_int)
_filtered_data.header._analysis_period = analysis_period
return _filtered_data
| 435,278
|
Filter the Data Collection based on a list of months of the year (as integers).
Args:
months: A List of months of the year [1..12]
Return:
A new Data Collection with filtered data
|
def filter_by_months(self, months):
_filt_values = []
_filt_datetimes = []
for i, d in enumerate(self.datetimes):
if d in months:
_filt_datetimes.append(d)
_filt_values.append(self._values[i])
_filt_header = self.header.duplicate()
return MonthlyCollection(_filt_header, _filt_values, _filt_datetimes)
| 435,279
|
Filter the Data Collection based on an analysis period.
Args:
analysis period: A Ladybug analysis period
Return:
A new Data Collection with filtered data
|
def filter_by_analysis_period(self, analysis_period):
_filtered_data = self.filter_by_months_per_hour(
analysis_period.months_per_hour)
_filtered_data.header._analysis_period = analysis_period
return _filtered_data
| 435,283
|
Filter the Data Collection based on a list of months per hour (as strings).
Args:
months_per_hour: A list of tuples representing months per hour.
Each tuple should possess two values: the first is the month
and the second is the hour. (eg. (12, 23) = December at 11 PM)
Return:
A new Data Collection with filtered data
|
def filter_by_months_per_hour(self, months_per_hour):
_filt_values = []
_filt_datetimes = []
for i, d in enumerate(self.datetimes):
if d in months_per_hour:
_filt_datetimes.append(d)
_filt_values.append(self._values[i])
return MonthlyPerHourCollection(
self.header.duplicate(), _filt_values, _filt_datetimes)
| 435,284
|
Create a location from a dictionary.
Args:
data: {
"city": "-",
"latitude": 0,
"longitude": 0,
"time_zone": 0,
"elevation": 0}
|
def from_json(cls, data):
optional_keys = ('city', 'state', 'country', 'latitude', 'longitude',
'time_zone', 'elevation', 'station_id', 'source')
for key in optional_keys:
if key not in data:
data[key] = None
return cls(data['city'], data['state'], data['country'], data['latitude'],
data['longitude'], data['time_zone'], data['elevation'],
data['station_id'], data['source'])
| 435,293
|
Try to create a Ladybug location from a location string.
Args:
locationString: Location string
Usage:
l = Location.from_location(locationString)
|
def from_location(cls, location):
if not location:
return cls()
try:
if hasattr(location, 'isLocation'):
# Ladybug location
return location
elif hasattr(location, 'Latitude'):
# Revit's location
return cls(city=str(location.Name.replace(",", " ")),
latitude=location.Latitude,
longitude=location.Longitude)
elif location.startswith('Site:'):
loc, city, latitude, longitude, time_zone, elevation = \
[x.strip() for x in re.findall(r'\r*\n*([^\r\n]*)[,|;]',
location, re.DOTALL)]
else:
try:
city, latitude, longitude, time_zone, elevation = \
[key.split(":")[-1].strip()
for key in location.split(",")]
except ValueError:
# it's just the city name
return cls(city=location)
return cls(city=city, country=None, latitude=latitude,
longitude=longitude, time_zone=time_zone,
elevation=elevation)
except Exception as e:
raise ValueError(
"Failed to create a Location from %s!\n%s" % (location, e))
| 435,294
|
Initalize an EPW object from from a local .epw file.
Args:
file_path: Local file address to an .epw file.
|
def __init__(self, file_path):
self._file_path = os.path.normpath(file_path) if file_path is not None else None
self._is_header_loaded = False
self._is_data_loaded = False
self._is_ip = False # track if collections have been coverted to IP
# placeholders for the EPW data that will be imported
self._data = []
self._metadata = {}
self._heating_dict = {}
self._cooling_dict = {}
self._extremes_dict = {}
self._extreme_hot_weeks = {}
self._extreme_cold_weeks = {}
self._typical_weeks = {}
self._monthly_ground_temps = {}
self._is_leap_year = False
self.daylight_savings_start = '0'
self.daylight_savings_end = '0'
self.comments_1 = ''
self.comments_2 = ''
self._num_of_fields = 35
| 435,303
|
Save epw object as an epw file.
args:
file_path: A string representing the path to write the epw file to.
|
def save(self, file_path):
# load data if it's not loaded convert to SI if it is in IP
if not self.is_data_loaded:
self._import_data()
originally_ip = False
if self.is_ip is True:
self.convert_to_si()
originally_ip = True
# write the file
lines = self.header
try:
# if the first value is at 1AM, move first item to end position
for field in xrange(0, self._num_of_fields):
point_in_time = self._data[field].header.data_type.point_in_time
if point_in_time is True:
first_hour = self._data[field]._values.pop(0)
self._data[field]._values.append(first_hour)
annual_a_per = AnalysisPeriod(is_leap_year=self.is_leap_year)
for hour in xrange(0, len(annual_a_per.datetimes)):
line = []
for field in xrange(0, self._num_of_fields):
line.append(str(self._data[field]._values[hour]))
lines.append(",".join(line) + "\n")
except IndexError:
# cleaning up
length_error_msg = 'Data length is not for a full year and cannot be ' + \
'saved as an EPW file.'
raise ValueError(length_error_msg)
else:
file_data = ''.join(lines)
write_to_file(file_path, file_data, True)
finally:
del(lines)
# move last item to start position for fields on the hour
for field in xrange(0, self._num_of_fields):
point_in_time = self._data[field].header.data_type.point_in_time
if point_in_time is True:
last_hour = self._data[field]._values.pop()
self._data[field]._values.insert(0, last_hour)
if originally_ip is True:
self.convert_to_ip()
return file_path
| 435,325
|
Return a data field by field number.
This is a useful method to get the values for fields that Ladybug
currently doesn't import by default. You can find list of fields by typing
EPWFields.fields
Args:
field_number: a value between 0 to 34 for different available epw fields.
Returns:
An annual Ladybug list
|
def _get_data_by_field(self, field_number):
if not self.is_data_loaded:
self._import_data()
# check input data
if not 0 <= field_number < self._num_of_fields:
raise ValueError("Field number should be between 0-%d" % self._num_of_fields)
return self._data[field_number]
| 435,327
|
Write an wea file from the epw file.
WEA carries radiation values from epw. Gendaymtx uses these values to
generate the sky. For an annual analysis it is identical to using epw2wea.
args:
file_path: Full file path for output file.
hoys: List of hours of the year. Default is 0-8759.
|
def to_wea(self, file_path, hoys=None):
hoys = hoys or xrange(len(self.direct_normal_radiation.datetimes))
if not file_path.lower().endswith('.wea'):
file_path += '.wea'
originally_ip = False
if self.is_ip is True:
self.convert_to_si()
originally_ip = True
# write header
lines = [self._get_wea_header()]
# write values
datetimes = self.direct_normal_radiation.datetimes
for hoy in hoys:
dir_rad = self.direct_normal_radiation[hoy]
dif_rad = self.diffuse_horizontal_radiation[hoy]
line = "%d %d %.3f %d %d\n" \
% (datetimes[hoy].month,
datetimes[hoy].day,
datetimes[hoy].hour + 0.5,
dir_rad, dif_rad)
lines.append(line)
file_data = ''.join(lines)
write_to_file(file_path, file_data, True)
if originally_ip is True:
self.convert_to_ip()
return file_path
| 435,330
|
Check if time is included in analysis period.
Return True if time is inside this analysis period,
otherwise return False
Args:
time: A DateTime to be tested
Returns:
A boolean. True if time is included in analysis period
|
def is_time_included(self, time):
if self._timestamps_data is None:
self._calculate_timestamps()
# time filtering in Ladybug Tools is slightly different than "normal"
# filtering since start hour and end hour will be applied for every day.
# For instance 2/20 9am to 2/22 5pm means hour between 9-17
# during 20, 21 and 22 of Feb.
return time.moy in self._timestamps_data
| 435,347
|
Create wea object from a wea file.
Args:
weafile:Full path to wea file.
timestep: An optional integer to set the number of time steps per hour.
Default is 1 for one value per hour. If the wea file has a time step
smaller than an hour adjust this input accordingly.
is_leap_year: A boolean to indicate if values are representing a leap year.
Default is False.
|
def from_file(cls, weafile, timestep=1, is_leap_year=False):
assert os.path.isfile(weafile), 'Failed to find {}'.format(weafile)
location = Location()
with open(weafile, readmode) as weaf:
first_line = weaf.readline()
assert first_line.startswith('place'), \
'Failed to find place in header. ' \
'{} is not a valid wea file.'.format(weafile)
location.city = ' '.join(first_line.split()[1:])
# parse header
location.latitude = float(weaf.readline().split()[-1])
location.longitude = -float(weaf.readline().split()[-1])
location.time_zone = -int(weaf.readline().split()[-1]) / 15
location.elevation = float(weaf.readline().split()[-1])
weaf.readline() # pass line for weather data units
# parse irradiance values
direct_normal_irradiance = []
diffuse_horizontal_irradiance = []
for line in weaf:
dirn, difh = [int(v) for v in line.split()[-2:]]
direct_normal_irradiance.append(dirn)
diffuse_horizontal_irradiance.append(difh)
return cls.from_values(location, direct_normal_irradiance,
diffuse_horizontal_irradiance, timestep, is_leap_year)
| 435,358
|
Create an ASHRAE Revised Clear Sky wea object from the monthly sky
optical depths in a .stat file.
Args:
statfile: Full path to the .stat file.
timestep: An optional integer to set the number of time steps per
hour. Default is 1 for one value per hour.
is_leap_year: A boolean to indicate if values are representing a leap year.
Default is False.
|
def from_stat_file(cls, statfile, timestep=1, is_leap_year=False):
stat = STAT(statfile)
# check to be sure the stat file does not have missing tau values
def check_missing(opt_data, data_name):
if opt_data == []:
raise ValueError('Stat file contains no optical data.')
for i, x in enumerate(opt_data):
if x is None:
raise ValueError(
'Missing optical depth data for {} at month {}'.format(
data_name, i)
)
check_missing(stat.monthly_tau_beam, 'monthly_tau_beam')
check_missing(stat.monthly_tau_diffuse, 'monthly_tau_diffuse')
return cls.from_ashrae_revised_clear_sky(stat.location, stat.monthly_tau_beam,
stat.monthly_tau_diffuse, timestep,
is_leap_year)
| 435,360
|
Unflatten a falttened generator.
Args:
guide: A guide list to follow the structure
falttened_input: A flattened iterator object
Usage:
guide = [["a"], ["b","c","d"], [["e"]], ["f"]]
input_list = [0, 1, 2, 3, 4, 5, 6, 7]
unflatten(guide, iter(input_list))
>> [[0], [1, 2, 3], [[4]], [5]]
|
def unflatten(guide, falttened_input):
return [unflatten(sub_list, falttened_input) if isinstance(sub_list, list)
else next(falttened_input) for sub_list in guide]
| 435,377
|
Get Sun data for an hour of the year.
Args:
month: An integer between 1-12
day: An integer between 1-31
hour: A positive number between 0..23
is_solar_time: A boolean to indicate if the input hour is solar time.
(Default: False)
Returns:
A sun object for this particular time
|
def calculate_sun(self, month, day, hour, is_solar_time=False):
datetime = DateTime(month, day, *self._calculate_hour_and_minute(hour),
leap_year=self.is_leap_year)
return self.calculate_sun_from_date_time(datetime, is_solar_time)
| 435,391
|
Get Sun data for an hour of the year.
Args:
datetime: Ladybug datetime
is_solar_time: A boolean to indicate if the input hour is solar time
(Default: False).
Returns:
A sun object for this particular time
|
def calculate_sun_from_hoy(self, hoy, is_solar_time=False):
datetime = DateTime.from_hoy(hoy, self.is_leap_year)
return self.calculate_sun_from_date_time(datetime, is_solar_time)
| 435,392
|
Get Sun for an hour of the year.
This code is originally written by Trygve Wastvedt \
(Trygve.Wastvedt@gmail.com)
based on (NOAA) and modified by Chris Mackey and Mostapha Roudsari
Args:
datetime: Ladybug datetime
is_solar_time: A boolean to indicate if the input hour is solar time.
(Default: False)
Returns:
A sun object for this particular time
|
def calculate_sun_from_date_time(self, datetime, is_solar_time=False):
# TODO(mostapha): This should be more generic and based on a method
if datetime.year != 2016 and self.is_leap_year:
datetime = DateTime(datetime.month, datetime.day, datetime.hour,
datetime.minute, True)
sol_dec, eq_of_time = self._calculate_solar_geometry(datetime)
hour = datetime.float_hour
is_daylight_saving = self.is_daylight_saving_hour(datetime.hoy)
hour = hour + 1 if self.is_daylight_saving_hour(datetime.hoy) else hour
# minutes
sol_time = self._calculate_solar_time(hour, eq_of_time, is_solar_time) * 60
# degrees
if sol_time / 4 < 0:
hour_angle = sol_time / 4 + 180
else:
hour_angle = sol_time / 4 - 180
# Degrees
zenith = math.degrees(math.acos
(math.sin(self._latitude) *
math.sin(math.radians(sol_dec)) +
math.cos(self._latitude) *
math.cos(math.radians(sol_dec)) *
math.cos(math.radians(hour_angle))))
altitude = 90 - zenith
# Approx Atmospheric Refraction
if altitude > 85:
atmos_refraction = 0
else:
if altitude > 5:
atmos_refraction = 58.1 / math.tan(math.radians(altitude))
- 0.07 / (math.tan(math.radians(altitude)))**3
+ 0.000086 / (math.tan(math.radians(altitude)))**5
else:
if altitude > -0.575:
atmos_refraction = 1735
+ altitude * (-518.2 + altitude *
(103.4 + altitude *
(-12.79 + altitude * 0.711)))
else:
atmos_refraction = -20.772 / math.tan(
math.radians(altitude))
atmos_refraction /= 3600
altitude += atmos_refraction
# Degrees
if hour_angle > 0:
azimuth = (math.degrees(
math.acos(
(
(math.sin(self._latitude) *
math.cos(math.radians(zenith))) -
math.sin(math.radians(sol_dec))) /
(math.cos(self._latitude) *
math.sin(math.radians(zenith)))
)
) + 180) % 360
else:
azimuth = (540 - math.degrees(math.acos((
(math.sin(self._latitude) *
math.cos(math.radians(zenith))) -
math.sin(math.radians(sol_dec))) /
(math.cos(self._latitude) *
math.sin(math.radians(zenith))))
)) % 360
altitude = math.radians(altitude)
azimuth = math.radians(azimuth)
# create the sun for this hour
return Sun(datetime, altitude, azimuth, is_solar_time, is_daylight_saving,
self.north_angle)
| 435,393
|
Create a DDY from a dictionary.
Args:
data = {
"location": ladybug Location schema,
"design_days": [] // list of ladybug DesignDay schemas}
|
def from_json(cls, data):
required_keys = ('location', 'design_days')
for key in required_keys:
assert key in data, 'Required key "{}" is missing!'.format(key)
return cls(Location.from_json(data['location']),
[DesignDay.from_json(des_day) for des_day in data['design_days']])
| 435,421
|
Initalize from a ddy file object from an existing ddy file.
args:
file_path: A string representing a complete path to the .ddy file.
|
def from_ddy_file(cls, file_path):
# check that the file is there
if not os.path.isfile(file_path):
raise ValueError(
'Cannot find a .ddy file at {}'.format(file_path))
if not file_path.lower().endswith('.ddy'):
raise ValueError(
'DDY file does not have a .ddy extension.')
# check the python version and open the file
try:
iron_python = True if platform.python_implementation() == 'IronPython' \
else False
except Exception:
iron_python = True
if iron_python:
ddywin = codecs.open(file_path, 'r')
else:
ddywin = codecs.open(file_path, 'r', encoding='utf-8', errors='ignore')
try:
ddytxt = ddywin.read()
location_format = re.compile(
r"(Site:Location,(.|\n)*?((;\s*!)|(;\s*\n)|(;\n)))")
design_day_format = re.compile(
r"(SizingPeriod:DesignDay,(.|\n)*?((;\s*!)|(;\s*\n)|(;\n)))")
location_matches = location_format.findall(ddytxt)
des_day_matches = design_day_format.findall(ddytxt)
except Exception as e:
import traceback
raise Exception('{}\n{}'.format(e, traceback.format_exc()))
else:
# check to be sure location was found
assert len(location_matches) > 0, 'No location objects found ' \
'in .ddy file.'
# build design day and location objects
location = Location.from_location(location_matches[0][0])
design_days = [DesignDay.from_ep_string(
match[0], location) for match in des_day_matches]
finally:
ddywin.close()
cls_ = cls(location, design_days)
cls_._file_path = os.path.normpath(file_path)
return cls_
| 435,422
|
Save ddy object as a .ddy file.
args:
file_path: A string representing the path to write the ddy file to.
|
def save(self, file_path):
# write all data into the file
# write the file
data = self.location.ep_style_location_string + '\n\n'
for d_day in self.design_days:
data = data + d_day.ep_style_string + '\n\n'
write_to_file(file_path, data, True)
| 435,423
|
Create a Design Day from a dictionary.
Args:
data = {
"name": string,
"day_type": string,
"location": ladybug Location schema,
"dry_bulb_condition": ladybug DryBulbCondition schema,
"humidity_condition": ladybug HumidityCondition schema,
"wind_condition": ladybug WindCondition schema,
"sky_condition": ladybug SkyCondition schema}
|
def from_json(cls, data):
required_keys = ('name', 'day_type', 'location', 'dry_bulb_condition',
'humidity_condition', 'wind_condition', 'sky_condition')
for key in required_keys:
assert key in data, 'Required key "{}" is missing!'.format(key)
return cls(data['name'], data['day_type'], Location.from_json(data['location']),
DryBulbCondition.from_json(data['dry_bulb_condition']),
HumidityCondition.from_json(data['humidity_condition']),
WindCondition.from_json(data['wind_condition']),
SkyCondition.from_json(data['sky_condition']))
| 435,430
|
Initalize from an EnergyPlus string of a SizingPeriod:DesignDay.
args:
ep_string: A full string representing a SizingPeriod:DesignDay.
|
def from_ep_string(cls, ep_string, location):
# format the object into a list of properties
ep_string = ep_string.strip()
if '\n' in ep_string:
ep_lines = ep_string.split('\n')
else:
ep_lines = ep_string.split('\r')
lines = [l.split('!')[0].strip().replace(',', '') for l in ep_lines]
# check to be sure that we have a valid ddy object
assert len(lines) == 27 or len(lines) == 26, "Number " \
"of lines of text [{}] does not correspond" \
" to an EP Design Day [26 or 27]".format(
len(lines))
lines[-1] = lines[-1].split(';')[0]
# extract primary properties
name = lines[1]
day_type = lines[4]
# extract dry bulb temperatures
dry_bulb_condition = DryBulbCondition(
float(lines[5]), float(lines[6]), lines[7], lines[8])
# extract humidity conditions
h_type = lines[9]
h_val = 0 if lines[10] == '' else float(lines[10])
if h_type == 'HumidityRatio':
h_val = float(lines[12])
elif h_type == 'Enthalpy':
h_val = float(lines[13])
humidity_condition = HumidityCondition(
h_type, h_val, float(lines[15]), lines[11])
# extract wind conditions
wind_condition = WindCondition(
float(lines[16]), float(lines[17]), lines[18], lines[19])
# extract the sky conditions
sky_model = lines[21]
if sky_model == 'ASHRAEClearSky':
sky_condition = OriginalClearSkyCondition(
int(lines[2]), int(lines[3]), float(lines[26]), lines[20])
elif sky_model == 'ASHRAETau':
sky_condition = RevisedClearSkyCondition(
int(lines[2]), int(lines[3]), float(lines[24]),
float(lines[25]), lines[20])
else:
sky_condition = SkyCondition(
sky_model, int(lines[2]), int(lines[3]), lines[20])
if sky_model == 'Schedule':
sky_condition.beam_shced = lines[22]
sky_condition.diff_shced = lines[23]
return cls(name, day_type, location, dry_bulb_condition,
humidity_condition, wind_condition, sky_condition)
| 435,431
|
Create a Humidity Condition from a dictionary.
Args:
data = {
"hum_type": string,
"hum_value": float,
"barometric_pressure": float,
"schedule": string,
"wet_bulb_range": string}
|
def from_json(cls, data):
# Check required and optional keys
required_keys = ('hum_type', 'hum_value')
optional_keys = {'barometric_pressure': 101325,
'schedule': '', 'wet_bulb_range': ''}
for key in required_keys:
assert key in data, 'Required key "{}" is missing!'.format(key)
for key, val in optional_keys.items():
if key not in data:
data[key] = val
return cls(data['hum_type'], data['hum_value'], data['barometric_pressure'],
data['schedule'], data['wet_bulb_range'])
| 435,456
|
Get a list of dew points (C) at each hour over the design day.
args:
dry_bulb_condition: The dry bulb condition for the day.
|
def hourly_dew_point_values(self, dry_bulb_condition):
hourly_dew_point = []
max_dpt = self.dew_point(dry_bulb_condition.dry_bulb_max)
for db in dry_bulb_condition.hourly_values:
if db >= max_dpt:
hourly_dew_point.append(max_dpt)
else:
hourly_dew_point.append(db)
return hourly_dew_point
| 435,457
|
Get the dew point (C), which is constant throughout the day (except at saturation).
args:
db: The maximum dry bulb temperature over the day.
|
def dew_point(self, db):
if self._hum_type == 'Dewpoint':
return self._hum_value
elif self._hum_type == 'Wetbulb':
return dew_point_from_db_wb(
db, self._hum_value, self._barometric_pressure)
elif self._hum_type == 'HumidityRatio':
return dew_point_from_db_hr(
db, self._hum_value, self._barometric_pressure)
elif self._hum_type == 'Enthalpy':
return dew_point_from_db_enth(
db, self._hum_value / 1000, self._barometric_pressure)
| 435,458
|
Create a Wind Condition from a dictionary.
Args:
data = {
"wind_speed": float,
"wind_direction": float,
"rain": bool,
"snow_on_ground": bool}
|
def from_json(cls, data):
# Check required and optional keys
optional_keys = {'wind_direction': 0, 'rain': False, 'snow_on_ground': False}
assert 'wind_speed' in data, 'Required key "wind_speed" is missing!'
for key, val in optional_keys.items():
if key not in data:
data[key] = val
return cls(data['wind_speed'], data['wind_direction'], data['rain'],
data['snow_on_ground'])
| 435,464
|
Create a Sky Condition from a dictionary.
Args:
data = {
"solar_model": string,
"month": int,
"day_of_month": int,
"daylight_savings_indicator": string // "Yes" or "No"}
|
def from_json(cls, data):
# Check required and optional keys
required_keys = ('solar_model', 'month', 'day_of_month')
for key in required_keys:
assert key in data, 'Required key "{}" is missing!'.format(key)
if data['solar_model'] == 'ASHRAEClearSky':
return OriginalClearSkyCondition.from_json(data)
if data['solar_model'] == 'ASHRAETau':
return RevisedClearSkyCondition.from_json(data)
if 'daylight_savings_indicator' not in data:
data['daylight_savings_indicator'] = 'No'
optional_keys = ('beam_shced', 'diff_sched')
for key in optional_keys:
if key not in data:
data[key] = ''
return cls(data['month'], data['day_of_month'], data['clearness'],
data['daylight_savings_indicator'],
data['beam_shced'], data['diff_sched'])
| 435,471
|
Create a Sky Condition from a dictionary.
Args:
data = {
"solar_model": string,
"month": int,
"day_of_month": int,
"clearness": float,
"daylight_savings_indicator": string // "Yes" or "No"}
|
def from_json(cls, data):
# Check required and optional keys
required_keys = ('solar_model', 'month', 'day_of_month', 'clearness')
for key in required_keys:
assert key in data, 'Required key "{}" is missing!'.format(key)
if 'daylight_savings_indicator' not in data:
data['daylight_savings_indicator'] = 'No'
return cls(data['month'], data['day_of_month'], data['clearness'],
data['daylight_savings_indicator'])
| 435,480
|
Initialize base collection.
Args:
header: A Ladybug Header object.
values: A list of values.
datetimes: A list of Ladybug DateTime objects that aligns with
the list of values.
|
def __init__(self, header, values, datetimes):
assert isinstance(header, Header), \
'header must be a Ladybug Header object. Got {}'.format(type(header))
assert isinstance(datetimes, Iterable) \
and not isinstance(datetimes, (str, dict, bytes, bytearray)), \
'datetimes should be a list or tuple. Got {}'.format(type(datetimes))
self._header = header
self._datetimes = tuple(datetimes)
self.values = values
self._validated_a_period = False
| 435,489
|
Create a Data Collection from a dictionary.
Args:
{
"header": A Ladybug Header,
"values": An array of values,
"datetimes": An array of datetimes,
"validated_a_period": Boolean for whether header analysis_period is valid
}
|
def from_json(cls, data):
assert 'header' in data, 'Required keyword "header" is missing!'
assert 'values' in data, 'Required keyword "values" is missing!'
assert 'datetimes' in data, 'Required keyword "datetimes" is missing!'
coll = cls(Header.from_json(data['header']), data['values'], data['datetimes'])
if 'validated_a_period' in data:
coll._validated_a_period = data['validated_a_period']
return coll
| 435,490
|
Get a value representing a the input percentile of the Data Collection.
Args:
percentile: A float value from 0 to 100 representing the
requested percentile.
Return:
The Data Collection value at the input percentile
|
def get_percentile(self, percentile):
assert 0 <= percentile <= 100, \
'percentile must be between 0 and 100. Got {}'.format(percentile)
return self._percentile(self._values, percentile)
| 435,499
|
Filter the Data Collection based on a conditional statement.
Args:
statement: A conditional statement as a string (e.g. a > 25 and a%5 == 0).
The variable should always be named as 'a' (without quotations).
Return:
A new Data Collection containing only the filtered data
|
def filter_by_conditional_statement(self, statement):
_filt_values, _filt_datetimes = self._filter_by_statement(statement)
if self._enumeration is None:
self._get_mutable_enumeration()
col_obj = self._enumeration['mutable'][self._collection_type]
collection = col_obj(self.header.duplicate(), _filt_values, _filt_datetimes)
collection._validated_a_period = self._validated_a_period
return collection
| 435,500
|
Filter the Data Collection based on a list of booleans.
Args:
pattern: A list of True/False values. Typically, this is a list
with a length matching the length of the Data Collections values
but it can also be a pattern to be repeated over the Data Collection.
Return:
A new Data Collection with filtered data
|
def filter_by_pattern(self, pattern):
_filt_values, _filt_datetimes = self._filter_by_pattern(pattern)
if self._enumeration is None:
self._get_mutable_enumeration()
col_obj = self._enumeration['mutable'][self._collection_type]
collection = col_obj(self.header.duplicate(), _filt_values, _filt_datetimes)
collection._validated_a_period = self._validated_a_period
return collection
| 435,501
|
Check if this Data Collection is aligned with another.
Aligned Data Collections are of the same Data Collection class, have the
same number of values and have matching datetimes.
Args:
data_collection: The Data Collection which you want to test if this
collection is aligned with.
Return:
True if collections are aligned, False if not aligned
|
def is_collection_aligned(self, data_collection):
if self._collection_type != data_collection._collection_type:
return False
elif len(self.values) != len(data_collection.values):
return False
elif self.datetimes != data_collection.datetimes:
return False
else:
return True
| 435,502
|
Test if a series of Data Collections are aligned with one another.
Aligned Data Collections are of the same Data Collection class, have the
same number of values and have matching datetimes.
Args:
data_collections: A list of Data Collections for which you want to
test if they are al aligned with one another.
Return:
True if collections are aligned, False if not aligned
|
def are_collections_aligned(data_collections, raise_exception=True):
if len(data_collections) > 1:
first_coll = data_collections[0]
for coll in data_collections[1:]:
if not first_coll.is_collection_aligned(coll):
if raise_exception is True:
error_msg = '{} Data Collection is not aligned with '\
'{} Data Collection.'.format(
first_coll.header.data_type, coll.header.data_type)
raise ValueError(error_msg)
return False
return True
| 435,508
|
Find the percentile of a list of values.
Args:
values: A list of values for which percentiles are desired
percent: A float value from 0 to 100 representing the requested percentile.
key: optional key function to compute value from each element of N.
Return:
The percentile of the values
|
def _percentile(self, values, percent, key=lambda x: x):
vals = sorted(values)
k = (len(vals) - 1) * (percent / 100)
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(vals[int(k)])
d0 = key(vals[int(f)]) * (c - k)
d1 = key(vals[int(c)]) * (k - f)
return d0 + d1
| 435,516
|
Create Ladybug datetime.
Args:
month: A value for month between 1-12 (Defualt: 1).
day: A value for day between 1-31 (Defualt: 1).
hour: A value for hour between 0-23 (Defualt: 0).
minute: A value for month between 0-59 (Defualt: 0).
leap_year: A boolean to indicate if datetime is for a leap year
(Default: False).
|
def __new__(cls, month=1, day=1, hour=0, minute=0, leap_year=False):
year = 2016 if leap_year else 2017
hour, minute = cls._calculate_hour_and_minute(hour + minute / 60.0)
try:
return datetime.__new__(cls, year, month, day, hour, minute)
except ValueError as e:
raise ValueError("{}:\n\t({}/{}@{}:{})(m/d@h:m)".format(
e, month, day, hour, minute
))
| 435,522
|
Creat datetime from a dictionary.
Args:
data: {
'month': A value for month between 1-12. (Defualt: 1)
'day': A value for day between 1-31. (Defualt: 1)
'hour': A value for hour between 0-23. (Defualt: 0)
'minute': A value for month between 0-59. (Defualt: 0)
}
|
def from_json(cls, data):
if 'month' not in data:
data['month'] = 1
if 'day' not in data:
data['day'] = 1
if 'hour' not in data:
data['hour'] = 0
if 'minute' not in data:
data['minute'] = 0
if 'year' not in data:
data['year'] = 2017
leap_year = True if int(data['year']) == 2016 else False
return cls(data['month'], data['day'], data['hour'], data['minute'], leap_year)
| 435,523
|
Create Ladybug Datetime from an hour of the year.
Args:
hoy: A float value 0 <= and < 8760
|
def from_hoy(cls, hoy, leap_year=False):
return cls.from_moy(round(hoy * 60), leap_year)
| 435,524
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.