hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794c7cbc7fcf3114024f3eb810617fb6f4785168
| 15,921
|
py
|
Python
|
talon_one/models/update_campaign.py
|
talon-one/talon_one.py
|
f863bb3c2cc5ddc94d9227adcf14947b2ea7db41
|
[
"MIT"
] | 1
|
2021-03-05T06:41:26.000Z
|
2021-03-05T06:41:26.000Z
|
talon_one/models/update_campaign.py
|
talon-one/talon_one.py
|
f863bb3c2cc5ddc94d9227adcf14947b2ea7db41
|
[
"MIT"
] | 1
|
2021-09-07T08:56:58.000Z
|
2021-09-07T08:56:58.000Z
|
talon_one/models/update_campaign.py
|
talon-one/talon_one.py
|
f863bb3c2cc5ddc94d9227adcf14947b2ea7db41
|
[
"MIT"
] | 1
|
2019-05-21T10:27:54.000Z
|
2019-05-21T10:27:54.000Z
|
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class UpdateCampaign(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'description': 'str',
'start_time': 'datetime',
'end_time': 'datetime',
'attributes': 'object',
'state': 'str',
'active_ruleset_id': 'int',
'tags': 'list[str]',
'features': 'list[str]',
'coupon_settings': 'CodeGeneratorSettings',
'referral_settings': 'CodeGeneratorSettings',
'limits': 'list[LimitConfig]',
'campaign_groups': 'list[int]'
}
attribute_map = {
'name': 'name',
'description': 'description',
'start_time': 'startTime',
'end_time': 'endTime',
'attributes': 'attributes',
'state': 'state',
'active_ruleset_id': 'activeRulesetId',
'tags': 'tags',
'features': 'features',
'coupon_settings': 'couponSettings',
'referral_settings': 'referralSettings',
'limits': 'limits',
'campaign_groups': 'campaignGroups'
}
def __init__(self, name=None, description=None, start_time=None, end_time=None, attributes=None, state='enabled', active_ruleset_id=None, tags=None, features=None, coupon_settings=None, referral_settings=None, limits=None, campaign_groups=None, local_vars_configuration=None): # noqa: E501
"""UpdateCampaign - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._description = None
self._start_time = None
self._end_time = None
self._attributes = None
self._state = None
self._active_ruleset_id = None
self._tags = None
self._features = None
self._coupon_settings = None
self._referral_settings = None
self._limits = None
self._campaign_groups = None
self.discriminator = None
self.name = name
if description is not None:
self.description = description
if start_time is not None:
self.start_time = start_time
if end_time is not None:
self.end_time = end_time
if attributes is not None:
self.attributes = attributes
if state is not None:
self.state = state
if active_ruleset_id is not None:
self.active_ruleset_id = active_ruleset_id
self.tags = tags
self.features = features
if coupon_settings is not None:
self.coupon_settings = coupon_settings
if referral_settings is not None:
self.referral_settings = referral_settings
self.limits = limits
if campaign_groups is not None:
self.campaign_groups = campaign_groups
@property
def name(self):
"""Gets the name of this UpdateCampaign. # noqa: E501
A friendly name for this campaign. # noqa: E501
:return: The name of this UpdateCampaign. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this UpdateCampaign.
A friendly name for this campaign. # noqa: E501
:param name: The name of this UpdateCampaign. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) < 1):
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this UpdateCampaign. # noqa: E501
A detailed description of the campaign. # noqa: E501
:return: The description of this UpdateCampaign. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this UpdateCampaign.
A detailed description of the campaign. # noqa: E501
:param description: The description of this UpdateCampaign. # noqa: E501
:type: str
"""
self._description = description
@property
def start_time(self):
"""Gets the start_time of this UpdateCampaign. # noqa: E501
Datetime when the campaign will become active. # noqa: E501
:return: The start_time of this UpdateCampaign. # noqa: E501
:rtype: datetime
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this UpdateCampaign.
Datetime when the campaign will become active. # noqa: E501
:param start_time: The start_time of this UpdateCampaign. # noqa: E501
:type: datetime
"""
self._start_time = start_time
@property
def end_time(self):
"""Gets the end_time of this UpdateCampaign. # noqa: E501
Datetime when the campaign will become in-active. # noqa: E501
:return: The end_time of this UpdateCampaign. # noqa: E501
:rtype: datetime
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this UpdateCampaign.
Datetime when the campaign will become in-active. # noqa: E501
:param end_time: The end_time of this UpdateCampaign. # noqa: E501
:type: datetime
"""
self._end_time = end_time
@property
def attributes(self):
"""Gets the attributes of this UpdateCampaign. # noqa: E501
Arbitrary properties associated with this campaign # noqa: E501
:return: The attributes of this UpdateCampaign. # noqa: E501
:rtype: object
"""
return self._attributes
@attributes.setter
def attributes(self, attributes):
"""Sets the attributes of this UpdateCampaign.
Arbitrary properties associated with this campaign # noqa: E501
:param attributes: The attributes of this UpdateCampaign. # noqa: E501
:type: object
"""
self._attributes = attributes
@property
def state(self):
"""Gets the state of this UpdateCampaign. # noqa: E501
A disabled or archived campaign is not evaluated for rules or coupons. # noqa: E501
:return: The state of this UpdateCampaign. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this UpdateCampaign.
A disabled or archived campaign is not evaluated for rules or coupons. # noqa: E501
:param state: The state of this UpdateCampaign. # noqa: E501
:type: str
"""
allowed_values = ["enabled", "disabled", "archived"] # noqa: E501
if self.local_vars_configuration.client_side_validation and state not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}" # noqa: E501
.format(state, allowed_values)
)
self._state = state
@property
def active_ruleset_id(self):
"""Gets the active_ruleset_id of this UpdateCampaign. # noqa: E501
ID of Ruleset this campaign applies on customer session evaluation. # noqa: E501
:return: The active_ruleset_id of this UpdateCampaign. # noqa: E501
:rtype: int
"""
return self._active_ruleset_id
@active_ruleset_id.setter
def active_ruleset_id(self, active_ruleset_id):
"""Sets the active_ruleset_id of this UpdateCampaign.
ID of Ruleset this campaign applies on customer session evaluation. # noqa: E501
:param active_ruleset_id: The active_ruleset_id of this UpdateCampaign. # noqa: E501
:type: int
"""
self._active_ruleset_id = active_ruleset_id
@property
def tags(self):
"""Gets the tags of this UpdateCampaign. # noqa: E501
A list of tags for the campaign. # noqa: E501
:return: The tags of this UpdateCampaign. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this UpdateCampaign.
A list of tags for the campaign. # noqa: E501
:param tags: The tags of this UpdateCampaign. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and tags is None: # noqa: E501
raise ValueError("Invalid value for `tags`, must not be `None`") # noqa: E501
self._tags = tags
@property
def features(self):
"""Gets the features of this UpdateCampaign. # noqa: E501
A list of features for the campaign. # noqa: E501
:return: The features of this UpdateCampaign. # noqa: E501
:rtype: list[str]
"""
return self._features
@features.setter
def features(self, features):
"""Sets the features of this UpdateCampaign.
A list of features for the campaign. # noqa: E501
:param features: The features of this UpdateCampaign. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and features is None: # noqa: E501
raise ValueError("Invalid value for `features`, must not be `None`") # noqa: E501
allowed_values = ["coupons", "referrals", "loyalty"] # noqa: E501
if (self.local_vars_configuration.client_side_validation and
not set(features).issubset(set(allowed_values))): # noqa: E501
raise ValueError(
"Invalid values for `features` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(features) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._features = features
@property
def coupon_settings(self):
"""Gets the coupon_settings of this UpdateCampaign. # noqa: E501
:return: The coupon_settings of this UpdateCampaign. # noqa: E501
:rtype: CodeGeneratorSettings
"""
return self._coupon_settings
@coupon_settings.setter
def coupon_settings(self, coupon_settings):
"""Sets the coupon_settings of this UpdateCampaign.
:param coupon_settings: The coupon_settings of this UpdateCampaign. # noqa: E501
:type: CodeGeneratorSettings
"""
self._coupon_settings = coupon_settings
@property
def referral_settings(self):
"""Gets the referral_settings of this UpdateCampaign. # noqa: E501
:return: The referral_settings of this UpdateCampaign. # noqa: E501
:rtype: CodeGeneratorSettings
"""
return self._referral_settings
@referral_settings.setter
def referral_settings(self, referral_settings):
"""Sets the referral_settings of this UpdateCampaign.
:param referral_settings: The referral_settings of this UpdateCampaign. # noqa: E501
:type: CodeGeneratorSettings
"""
self._referral_settings = referral_settings
@property
def limits(self):
"""Gets the limits of this UpdateCampaign. # noqa: E501
The set of limits that will operate for this campaign # noqa: E501
:return: The limits of this UpdateCampaign. # noqa: E501
:rtype: list[LimitConfig]
"""
return self._limits
@limits.setter
def limits(self, limits):
"""Sets the limits of this UpdateCampaign.
The set of limits that will operate for this campaign # noqa: E501
:param limits: The limits of this UpdateCampaign. # noqa: E501
:type: list[LimitConfig]
"""
if self.local_vars_configuration.client_side_validation and limits is None: # noqa: E501
raise ValueError("Invalid value for `limits`, must not be `None`") # noqa: E501
self._limits = limits
@property
def campaign_groups(self):
"""Gets the campaign_groups of this UpdateCampaign. # noqa: E501
The IDs of the campaign groups that own this entity. # noqa: E501
:return: The campaign_groups of this UpdateCampaign. # noqa: E501
:rtype: list[int]
"""
return self._campaign_groups
@campaign_groups.setter
def campaign_groups(self, campaign_groups):
"""Sets the campaign_groups of this UpdateCampaign.
The IDs of the campaign groups that own this entity. # noqa: E501
:param campaign_groups: The campaign_groups of this UpdateCampaign. # noqa: E501
:type: list[int]
"""
self._campaign_groups = campaign_groups
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateCampaign):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, UpdateCampaign):
return True
return self.to_dict() != other.to_dict()
| 33.447479
| 647
| 0.62025
|
794c7e79fe2ec69b920fbd038f346532e687b1b5
| 3,119
|
py
|
Python
|
src/wi/urls/user/farm.py
|
cc1-cloud/cc1
|
8113673fa13b6fe195cea99dedab9616aeca3ae8
|
[
"Apache-2.0"
] | 11
|
2015-05-06T14:16:54.000Z
|
2022-02-08T23:21:31.000Z
|
src/wi/urls/user/farm.py
|
fortress-shell/cc1
|
8113673fa13b6fe195cea99dedab9616aeca3ae8
|
[
"Apache-2.0"
] | 1
|
2015-10-30T21:08:11.000Z
|
2015-10-30T21:08:11.000Z
|
src/wi/urls/user/farm.py
|
fortress-shell/cc1
|
8113673fa13b6fe195cea99dedab9616aeca3ae8
|
[
"Apache-2.0"
] | 5
|
2016-02-12T22:01:38.000Z
|
2021-12-06T16:56:54.000Z
|
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.wi.urls.user.farm
@author Piotr Wójcik
@date 14.11.2011
"""
from django.conf.urls import patterns, url, include
from django.utils.translation import ugettext_lazy as _
from wi.forms.farm import CreateFarmForm1, CreateFarmForm2, CreateFarmForm3, CreateFarmForm4
from wi.forms.vm import EditVMForm
from wi.utils.decorators import user_permission
from wi.utils.views import direct_to_template, simple_generic_id, form_generic_id
from wi.views.user.farm import CreateFarmWizard
farm_patterns = patterns('wi.views.user.farm',
url(r'^$', user_permission(direct_to_template), {'template_name': 'farms/base.html'}, name='far_farms'),
url(r'^potato/$', user_permission(direct_to_template), {'template_name': 'farms/potato.html'}, name='far_potato'),
url(r'^create_farm/$', CreateFarmWizard.as_view([CreateFarmForm1, CreateFarmForm2, CreateFarmForm3, CreateFarmForm4]),
name='far_create_farm'),
url(r'^show_farm/$', user_permission(direct_to_template), {'template_name': 'farms/show_farm.html'}, name='far_show_farm'),
url(r'^ajax/get_table/$', 'far_ajax_get_table', name='far_ajax_get_table'),
url(r'^ajax/destroy_farm/(?P<id1>\d+)/$', user_permission(simple_generic_id),
{'template_name': 'generic/simple.html',
'success_msg': (lambda desc: _('You have successfully destroyed farm <b>%(desc)s</b>.') % {'desc': desc}),
'ask_msg': (lambda desc: _('Do you really want to destroy farm <b>%(desc)s</b>?') % {'desc': desc}),
'request_url': 'user/farm/destroy/',
'id_key': 'farm_id',
},
name='far_ajax_destroy_farm'),
url(r'^ajax/save_and_shutdown_farm/(?P<id1>\d+)/$', user_permission(form_generic_id),
{'template_name': 'generic/form.html',
'success_msg': (lambda desc, data: _('Farm head will be saved.') % {'desc': desc}),
'ask_msg': (lambda desc: _('The farm will be closed. Enter a name to save head of this farm.') % {'desc': desc}),
'confirmation': _('Save and shutdown'),
'request_url_post': 'user/farm/save_and_shutdown/',
'request_url_get': 'user/farm/get_by_id/',
'form_class': EditVMForm,
'id_key': 'farm_id', },
name='far_ajax_save_and_shutdown'),
)
urlpatterns = patterns('',
url(r'^farm/', include(farm_patterns)),
)
| 44.557143
| 135
| 0.672331
|
794c7fb34ccecd56e5b44cd54ea057d7663c1ba2
| 1,429
|
py
|
Python
|
user.py
|
3xistentialcrisis/Password
|
dda6343302d7048c90c34c36a0b1a3e240cd95de
|
[
"MIT"
] | null | null | null |
user.py
|
3xistentialcrisis/Password
|
dda6343302d7048c90c34c36a0b1a3e240cd95de
|
[
"MIT"
] | null | null | null |
user.py
|
3xistentialcrisis/Password
|
dda6343302d7048c90c34c36a0b1a3e240cd95de
|
[
"MIT"
] | null | null | null |
from credentials import Credential
#User Class
class User:
"""
Class that shall generate new instances of user accounts.
"""
#User Details Array
user_details = []
def __init__(self,fname,username,password):
"""
This __init__ method defines the properties of the User object
Args:
fname : the user's real first name
username : associated with the user's account
password : user's password
"""
self.fname = fname
self.username = username
self.password = password
def save_user(self):
"""
This is a method that saves a user to the user_details array
"""
User.user_details.append(self)
@classmethod
def display_users(cls):
"""
This is a method that returns/displays the contents of the user_details array
"""
return cls.user_details
@classmethod
def log_in(cls,username,password):
"""
This is a method that enables users to login to their password locker account
Args:
username: user's password locker app username
password: user's password locker app password
"""
for User in cls.user_details:
if User.username == username and User.password == password:
return Credential.credential_details
return False
| 26.462963
| 85
| 0.601819
|
794c7fd617a64c75674c71b5eb7ef6f50a2c1058
| 806
|
py
|
Python
|
flask_file_system/errors.py
|
quaxsze/flask-file-system
|
5ab2cb5c4b5f2b91b53153574d035a924eb6d74c
|
[
"MIT"
] | null | null | null |
flask_file_system/errors.py
|
quaxsze/flask-file-system
|
5ab2cb5c4b5f2b91b53153574d035a924eb6d74c
|
[
"MIT"
] | null | null | null |
flask_file_system/errors.py
|
quaxsze/flask-file-system
|
5ab2cb5c4b5f2b91b53153574d035a924eb6d74c
|
[
"MIT"
] | null | null | null |
__all__ = (
'FSError',
'FileExists',
'FileNotFound',
'UnauthorizedFileType',
'UploadNotAllowed',
'OperationNotSupported',
)
class FSError(Exception):
"""Base class for all Flask-FS Exceptions"""
pass
class UnauthorizedFileType(FSError):
"""Raised when trying to upload an unauthorized file type."""
pass
class UploadNotAllowed(FSError):
"""Raised when trying to upload into storage where upload is not allowed."""
pass
class FileExists(FSError):
"""Raised when trying to overwrite an existing file"""
pass
class FileNotFound(FSError):
"""Raised when trying to access a non existing file"""
pass
class OperationNotSupported(FSError):
"""Raised when trying to perform an unsupported operation by the current backend"""
pass
| 20.666667
| 87
| 0.694789
|
794c80216371ce8b19b37f387fddf34ec38fdc1f
| 2,852
|
py
|
Python
|
mimesis/data/int/business.py
|
aprasanna/mimesis
|
71b89fb36d6bf6d5b52b6719138d50a6565c0c75
|
[
"MIT"
] | 1
|
2019-03-24T05:00:14.000Z
|
2019-03-24T05:00:14.000Z
|
mimesis/data/int/business.py
|
aprasanna/mimesis
|
71b89fb36d6bf6d5b52b6719138d50a6565c0c75
|
[
"MIT"
] | null | null | null |
mimesis/data/int/business.py
|
aprasanna/mimesis
|
71b89fb36d6bf6d5b52b6719138d50a6565c0c75
|
[
"MIT"
] | null | null | null |
"""Provides all the generic data related to the business."""
CURRENCY_ISO_CODES = [
'AED',
'AFN',
'ALL',
'AMD',
'ANG',
'AOA',
'ARS',
'AUD',
'AWG',
'AZN',
'BAM',
'BBD',
'BDT',
'BGN',
'BHD',
'BIF',
'BMD',
'BND',
'BOB',
'BOV',
'BRL',
'BSD',
'BTN',
'BWP',
'BYN',
'BYR',
'BZD',
'CAD',
'CDF',
'CHE',
'CHF',
'CHW',
'CLF',
'CLP',
'CNY',
'COP',
'COU',
'CRC',
'CUC',
'CUP',
'CVE',
'CZK',
'DJF',
'DKK',
'DOP',
'DZD',
'EGP',
'ERN',
'ETB',
'EUR',
'FJD',
'FKP',
'GBP',
'GEL',
'GHS',
'GIP',
'GMD',
'GNF',
'GTQ',
'GYD',
'HKD',
'HNL',
'HRK',
'HTG',
'HUF',
'IDR',
'ILS',
'INR',
'IQD',
'IRR',
'ISK',
'JMD',
'JOD',
'JPY',
'KES',
'KGS',
'KHR',
'KMF',
'KPW',
'KRW',
'KWD',
'KYD',
'KZT',
'LAK',
'LBP',
'LKR',
'LRD',
'LSL',
'LYD',
'MAD',
'MDL',
'MGA',
'MKD',
'MMK',
'MNT',
'MOP',
'MRO',
'MUR',
'MVR',
'MWK',
'MXN',
'MXV',
'MYR',
'MZN',
'NAD',
'NGN',
'NIO',
'NOK',
'NPR',
'NZD',
'OMR',
'PAB',
'PEN',
'PGK',
'PHP',
'PKR',
'PLN',
'PYG',
'QAR',
'RON',
'RSD',
'RUB',
'RWF',
'SAR',
'SBD',
'SCR',
'SDG',
'SEK',
'SGD',
'SHP',
'SLL',
'SOS',
'SRD',
'SSP',
'STD',
'SVC',
'SYP',
'SZL',
'THB',
'TJS',
'TMT',
'TND',
'TOP',
'TRY',
'TTD',
'TWD',
'TZS',
'UAH',
'UGX',
'USD',
'USN',
'UYI',
'UYU',
'UZS',
'VEF',
'VND',
'VUV',
'WST',
'XAF',
'XAG',
'XAU',
'XBA',
'XBB',
'XBC',
'XBD',
'XCD',
'XDR',
'XOF',
'XPD',
'XPF',
'XPT',
'XSU',
'XTS',
'XUA',
'XXX',
'YER',
'ZAR',
'ZMW',
'ZWL',
]
CRYPTOCURRENCY_ISO_CODES = [
'DASH',
'ETH',
'IOT',
'VTC',
'XBC',
'XBT',
'BTC',
'XLM',
'XMR',
'XRP',
'XZC',
'ZEC',
]
CURRENCY_SYMBOLS = {
'cs': 'Kč',
'da': 'kr',
'de': '€',
'de-at': '€',
'de-ch': 'Fr.',
'el': '€',
'en': '$',
'en-ca': '$',
'en-gb': '£',
'en-au': '$',
'es': '€',
'es-mx': '$',
'et': '€',
'fa': '﷼',
'fi': '€',
'fr': '€',
'hu': 'Ft',
'is': 'kr',
'it': '€',
'ja': '¥',
'kk': '₸',
'ko': '₩',
'nl': '€',
'nl-be': '€',
'no': 'kr',
'pl': 'zł',
'pt': '€',
'pt-br': 'R$',
'ru': '₽',
'sv': 'kr',
'tr': '₺',
'uk': '₴',
'zh': '¥',
'default': '$',
}
CRYPTOCURRENCY_SYMBOLS = [
'Ƀ',
'Ł',
'Ξ',
]
| 11.785124
| 60
| 0.28331
|
794c8034f0fab32c508404814902c1f68da5ba99
| 10,042
|
py
|
Python
|
onir/predictors/reranker.py
|
tgeral68/OpenNIR
|
225b26185bd67fdc00f24de3ef70d35768e22243
|
[
"MIT"
] | 3
|
2021-01-07T15:44:38.000Z
|
2021-08-23T03:44:47.000Z
|
onir/predictors/reranker.py
|
tgeral68/OpenNIR
|
225b26185bd67fdc00f24de3ef70d35768e22243
|
[
"MIT"
] | null | null | null |
onir/predictors/reranker.py
|
tgeral68/OpenNIR
|
225b26185bd67fdc00f24de3ef70d35768e22243
|
[
"MIT"
] | 1
|
2021-11-16T09:10:47.000Z
|
2021-11-16T09:10:47.000Z
|
import os
import json
import torch
import onir
from onir import util, spec, predictors, datasets
from onir.interfaces import trec, plaintext
@predictors.register('reranker')
class Reranker(predictors.BasePredictor):
name = None
@staticmethod
def default_config():
return {
'batch_size': 64,
'gpu': True,
'gpu_determ': True,
'preload': False,
'run_threshold': 0,
'measures': 'map,ndcg,p@20,ndcg@20,mrr',
'source': 'run'
}
def __init__(self, config, ranker, trainer, dataset, vocab, logger, random):
self.config = config
self.ranker = ranker
self.trainer = trainer
self.dataset = dataset
self.logger = logger
self.vocab = vocab
self.random = random
self.input_spec = ranker.input_spec()
def _iter_batches(self, device):
fields = set(self.input_spec['fields']) | {'query_id', 'doc_id'}
it = datasets.record_iter(self.dataset,
fields=fields,
source=self.config['source'],
run_threshold=self.config['run_threshold'],
minrel=None,
shuf=False,
random=self.random,
inf=False)
for batch_items in util.chunked(it, self.config['batch_size']):
batch = {}
for record in batch_items:
for k, seq in record.items():
batch.setdefault(k, []).append(seq)
batch = spec.apply_spec_batch(batch, self.input_spec, device)
# ship 'em
yield batch
def _preload_batches(self, device):
with self.logger.duration('loading evaluation data'):
batches = list(self.logger.pbar(self._iter_batches(device), desc='preloading eval data (batches)'))
while True:
yield batches
def _reload_batches(self, device):
while True:
it = self._iter_batches(device)
yield it
def pred_ctxt(self):
device = util.device(self.config, self.logger)
if self.config['preload']:
datasource = self._preload_batches(device)
else:
datasource = self._reload_batches(device)
return PredictorContext(self, datasource, device)
def iter_scores(self, ranker, datasource, device):
if ranker.name == 'trivial' and not ranker.config['neg'] and not ranker.config['qsum'] and not ranker.config['max']:
for qid, values in self.dataset.run().items():
for did, score in values.items():
yield qid, did, score
return
if ranker.name == 'trivial' and not ranker.config['neg'] and not ranker.config['qsum'] and ranker.config['max']:
qrels = self.dataset.qrels()
for qid, values in self.dataset.run().items():
q_qrels = qrels.get(qid, {})
for did in values:
yield qid, did, q_qrels.get(did, -1)
return
with torch.no_grad():
ranker.eval()
ds = next(datasource, None)
total = None
if isinstance(ds, list):
total = sum(len(d['query_id']) for d in ds)
elif self.config['source'] == 'run':
if self.config['run_threshold'] > 0:
total = sum(min(len(v), self.config['run_threshold']) for v in self.dataset.run().values())
else:
total = sum(len(v) for v in self.dataset.run().values())
elif self.config['source'] == 'qrels':
total = sum(len(v) for v in self.dataset.qrels().values())
with self.logger.pbar_raw(total=total, desc='pred', quiet=True) as pbar:
for batch in util.background(ds):
batch = {k: (v.to(device) if torch.is_tensor(v) else v) for k, v in batch.items()}
rel_scores = self.ranker(**batch).cpu()
if len(rel_scores.shape) == 2:
rel_scores = rel_scores[:, 0]
triples = list(zip(batch['query_id'], batch['doc_id'], rel_scores))
for qid, did, score in triples:
yield qid, did, score.item()
pbar.update(len(batch['query_id']))
def rerank_dict(self, ranker, device):
datasource = self._reload_batches(device)
result = {}
for qid, did, score in self.iter_scores(ranker, datasource, device):
result.setdefault(qid, {})[did] = score
return result
class PredictorContext:
def __init__(self, pred, datasource, device):
self.pred = pred
self.datasource = datasource
self.device = device
def __call__(self, ctxt):
cached = True
epoch = ctxt['epoch']
base_path = os.path.join(ctxt['base_path'], self.pred.dataset.path_segment())
if self.pred.config['source'] == 'run' and self.pred.config['run_threshold'] > 0:
base_path = '{p}_runthreshold-{run_threshold}'.format(p=base_path, **self.pred.config)
os.makedirs(os.path.join(base_path, 'runs'), exist_ok=True)
with open(os.path.join(base_path, 'config.json'), 'wt') as f:
json.dump(self.pred.dataset.config, f)
run_path = os.path.join(base_path, 'runs', f'{epoch}.run')
if os.path.exists(run_path):
run = trec.read_run_dict(run_path)
else:
if self.pred.config['source'] == 'run' and self.pred.config['run_threshold'] > 0:
official_run = self.pred.dataset.run('dict')
else:
official_run = {}
run = {}
ranker = ctxt['ranker']().to(self.device)
this_qid = None
these_docs = {}
with util.finialized_file(run_path, 'wt') as f:
#print(ranker, self.datasource)
for qid, did, score in self.pred.iter_scores(ranker, self.datasource, self.device):
if qid != this_qid:
if this_qid is not None:
these_docs = self._apply_threshold(these_docs, official_run.get(this_qid, {}))
trec.write_run_dict(f, {this_qid: these_docs})
this_qid = qid
these_docs = {}
these_docs[did] = score
if this_qid is not None:
these_docs = self._apply_threshold(these_docs, official_run.get(this_qid, {}))
trec.write_run_dict(f, {this_qid: these_docs})
cached = False
result = {
'epoch': epoch,
'run': run,
'run_path': run_path,
'base_path': base_path,
'cached': cached
}
result['metrics'] = {m: None for m in self.pred.config['measures'].split(',') if m}
result['metrics_by_query'] = {m: None for m in result['metrics']}
#print(result['metrics'])
missing_metrics = self.load_metrics(result)
if missing_metrics:
#print("MISSING",missing_metrics)
measures = set(missing_metrics)
result['cached'] = False
qrels = self.pred.dataset.qrels()
calculated_metrics = onir.metrics.calc(qrels, run_path, measures)
result['metrics_by_query'].update(calculated_metrics)
result['metrics'].update(onir.metrics.mean(calculated_metrics))
self.write_missing_metrics(result, missing_metrics)
try:
if ctxt['ranker']().config.get('add_runscore'):
result['metrics']['runscore_alpha'] = torch.sigmoid(ctxt['ranker']().runscore_alpha).item()
rs_alpha_f = os.path.join(ctxt['base_path'], 'runscore_alpha.txt')
with open(rs_alpha_f, 'at') as f:
plaintext.write_tsv(rs_alpha_f, [(str(epoch), str(result['metrics']['runscore_alpha']))])
except FileNotFoundError:
pass # model may no longer exist, ignore
return result
def load_metrics(self, ctxt):
missing = set()
epoch = ctxt['epoch']
for metric in list(ctxt['metrics']):
path_agg = os.path.join(ctxt['base_path'], metric, 'agg.txt')
path_epoch = os.path.join(ctxt['base_path'], metric, f'{epoch}.txt')
if os.path.exists(path_agg) and os.path.exists(path_epoch):
ctxt['metrics'][metric] = [float(v) for k, v in plaintext.read_tsv(path_agg) if int(k) == epoch][0]
ctxt['metrics_by_query'][metric] = {k: float(v) for k, v in plaintext.read_tsv(path_epoch)}
else:
#print(os.path.exists(path_agg), path_agg)
#print(os.path.exists(path_epoch), path_epoch)
missing.add(metric)
return missing
def write_missing_metrics(self, ctxt, missing_metrics):
epoch = ctxt['epoch']
for metric in missing_metrics:
os.makedirs(os.path.join(ctxt['base_path'], metric), exist_ok=True)
path_agg = os.path.join(ctxt['base_path'], metric, 'agg.txt')
path_epoch = os.path.join(ctxt['base_path'], metric, f'{epoch}.txt')
with open(path_agg, 'at') as f:
plaintext.write_tsv(f, [(str(epoch), str(ctxt['metrics'][metric]))])
plaintext.write_tsv(path_epoch, ctxt['metrics_by_query'][metric].items())
def _apply_threshold(self, these_docs, original_scores):
min_score = min(these_docs.values())
missing_docs = original_scores.keys() - these_docs.keys()
for i, did in enumerate(sorted(missing_docs, key=lambda did: original_scores[did], reverse=True)):
these_docs[did] = min_score - i - 1
return these_docs
| 43.284483
| 124
| 0.553973
|
794c80a118d8313bfb5ac93e085bfcd3c20c80f5
| 9,600
|
py
|
Python
|
lib/visualization_lib.py
|
Diesmaster/komodo-cctools-python
|
c7345a3e0efc9bf42b31deb61c5a9be64b43d36c
|
[
"MIT"
] | null | null | null |
lib/visualization_lib.py
|
Diesmaster/komodo-cctools-python
|
c7345a3e0efc9bf42b31deb61c5a9be64b43d36c
|
[
"MIT"
] | null | null | null |
lib/visualization_lib.py
|
Diesmaster/komodo-cctools-python
|
c7345a3e0efc9bf42b31deb61c5a9be64b43d36c
|
[
"MIT"
] | null | null | null |
import csv
from datetime import datetime
import sys
from lib import tuilib
def create_prices_csv(rpc_connection, depth):
prices_json = rpc_connection.prices(depth)
timestamps = prices_json["timestamps"]
dates = []
for timestamp in timestamps:
dates.append(datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%dT%H:%M'))
prices_rows = []
for pair in prices_json["pricefeeds"]:
i = 0
for price in pair["prices"]:
pair_prices_row = []
pair_prices_row.append(dates[i])
pair_prices_row.append(price[0])
pair_prices_row.append(price[1])
pair_prices_row.append(price[2])
pair_prices_row.append(pair["name"])
i = i + 1
prices_rows.append(pair_prices_row)
with open('prices.csv', 'w') as f:
filewriter = csv.writer(f, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
filewriter.writerow(["date", "price1", "price2", "price3", "pair"])
for row in prices_rows:
filewriter.writerow(row)
f.close()
def create_delayed_prices_csv(rpc_connection, depth):
prices_json = rpc_connection.prices(depth)
timestamps = prices_json["timestamps"]
dates = []
for timestamp in timestamps:
dates.append(datetime.utcfromtimestamp(timestamp - 86400).strftime('%Y-%m-%dT%H:%M'))
prices_rows = []
for pair in prices_json["pricefeeds"]:
i = 0
for price in pair["prices"]:
pair_prices_row = []
pair_prices_row.append(dates[i])
pair_prices_row.append(price[0])
pair_prices_row.append(price[1])
pair_prices_row.append(price[2])
pair_prices_row.append(pair["name"])
i = i + 1
prices_rows.append(pair_prices_row)
with open('delayed_prices.csv', 'w') as f:
filewriter = csv.writer(f, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
filewriter.writerow(["date", "price1", "price2", "price3", "pair"])
for row in prices_rows:
filewriter.writerow(row)
f.close()
def get_pairs_names(rpc_connection):
prices_json = rpc_connection.prices("1")
pairs_names = []
for pair in prices_json["pricefeeds"]:
pairs_names.append(pair["name"])
return pairs_names
# opened bets
def create_csv_with_bets(rpc_connection, open_or_closed):
priceslist = rpc_connection.mypriceslist(open_or_closed)
bets_rows = []
for price in priceslist:
if price == "48194bab8d377a7fa0e62d5e908474dae906675395753f09969d4c4bea4a7518":
pass
else:
pricesinfo = rpc_connection.pricesinfo(price)
bets_rows_single = []
bets_rows_single.append(price)
bets_rows_single.append(pricesinfo["rekt"])
expression = pricesinfo["expression"].split(",")
adopted_expression = ""
for element in expression:
adopted_expression = adopted_expression + element
bets_rows_single.append(adopted_expression)
bets_rows_single.append(pricesinfo["leverage"])
bets_rows_single.append(pricesinfo["TotalPositionSize"])
bets_rows_single.append(pricesinfo["TotalProfits"])
bets_rows_single.append(pricesinfo["equity"])
bets_rows_single.append(pricesinfo["LastPrice"])
bets_rows_single.append(pricesinfo["LastHeight"])
bets_rows.append(bets_rows_single)
if open_or_closed == 'open':
filename = 'betlist.csv'
if open_or_closed == 'closed':
filename = 'betlist_history.csv'
with open(filename, 'w') as f:
filewriter = csv.writer(f, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
filewriter.writerow(["txid", "is rekt", "expression", "leverage", "TotalPositionSize", "TotalProfits", "equity", "LastPrice", "LastHeight"])
for row in bets_rows:
filewriter.writerow(row)
f.close
# function checking if prices for pair or inversed pair availiable on chain
def is_pair_availiable(rpc_connection, pair):
is_pair_in_list = False
is_pair_reversed = False
# getting known pairs list
known_pair_names = []
prices_output = rpc_connection.prices("1")
# getting reversed version of pairname
try:
splitted_synthetic = pair.split("_")
reversed_synthetic = splitted_synthetic[1] + "_" + splitted_synthetic[0]
except Exception:
return is_pair_in_list, is_pair_reversed
for feed in prices_output["pricefeeds"]:
known_pair_names.append(feed["name"])
if pair in known_pair_names:
is_pair_in_list = True
elif reversed_synthetic in known_pair_names:
is_pair_in_list = True
is_pair_reversed = True
return is_pair_in_list, is_pair_reversed
# function returning list with prices for pair name if it presist in list
# with inverted price if it inverted price, and with error if no such pair in prices call output
def return_prices_for_pair(rpc_connection, pair, depth):
prices_json = rpc_connection.prices(depth)
timestamps = prices_json["timestamps"]
# checking if it possible to get price for pair
pair_availability = is_pair_availiable(rpc_connection, pair)
# no such pair in prices output
if not pair_availability[0] and not pair_availability[1]:
print("Can't get price for this pair. Aborting.")
# pair available in prices output
if pair_availability[0] and not pair_availability[1]:
for feed in prices_json["pricefeeds"]:
if feed["name"] == pair:
prices = []
for price in feed["prices"]:
for price_value in price:
prices.append(price_value)
return prices, timestamps
# pair reversed version of some prices output pair
if pair_availability[0] and pair_availability[1]:
splitted_operator = pair.split("_")
reversed_operator = splitted_operator[1] + "_" + splitted_operator[0]
for pair in prices_json["pricefeeds"]:
if pair["name"] == reversed_operator:
prices = []
for price in pair["prices"]:
for price_value in price:
prices.append(1/price_value)
return prices, timestamps
# function returning list with stacks lists
def split_synthetic_on_stacks(rpc_connection, synthetic, depth):
stacks_list = []
stack_end = 0
for i in range(0, len(synthetic)):
if synthetic[i] == '*' or synthetic[i] == '/':
temp = synthetic[stack_end:(i + 1)]
stacks_list.append(temp)
stack_end = i + 1
return stacks_list
def count_stack(rpc_connection, stack, depth):
# 2 pairs in stack case
if len(stack) == 4:
prices1 = return_prices_for_pair(rpc_connection, stack[0], depth)
prices2 = return_prices_for_pair(rpc_connection, stack[1], depth)
# if operator is / dividing stuff, if operator is * multiplying stuff
if stack[2] == "/":
stack_prices = [(float(prices1[0][i])) / (float(prices2[0][i])) for i in range(len(prices1[0]))]
elif stack[2] == "*":
stack_prices = [float(prices1[0][i]) * float(prices2[0][i]) for i in range(len(prices1[0]))]
# 3 pairs in stack case
elif len(stack) == 5:
prices1 = return_prices_for_pair(rpc_connection, stack[0], depth)
prices2 = return_prices_for_pair(rpc_connection, stack[1], depth)
prices3 = return_prices_for_pair(rpc_connection, stack[2], depth)
if stack[3] == "/":
stack_prices = [(float(prices1[0][i])) / (float(prices2[0][i])) / (float(prices3[0][i])) for i in range(len(prices1[0]))]
elif stack[3] == "*":
stack_prices = [float(prices1[0][i]) * float(prices2[0][i]) * float(prices3[0][i]) for i in range(len(prices1[0]))]
else:
return "Incorrect stack!"
print(stack)
return stack_prices
def make_csv_for_stack(rpc_connection, stack, stack_name, depth):
stack_prices = count_stack(rpc_connection, stack, depth)
timestamps = rpc_connection.prices(depth)["timestamps"]
dates = []
for timestamp in timestamps:
dates.append(datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%dT%H:%M'))
prices_rows = []
pair_prices_row = []
j = 0
pair_name = ""
for element in stack:
pair_name = pair_name + element
for i in range(0, len(stack_prices), 3):
pair_prices_row.append(dates[j])
j = j + 1
pair_prices_row.append(stack_prices[i])
pair_prices_row.append(stack_prices[i+1])
pair_prices_row.append(stack_prices[i+2])
pair_prices_row.append(pair_name)
prices_rows.append(pair_prices_row)
pair_prices_row = []
with open(sys.path[0] + '/usergraphs/' + pair_name + '_user', 'w') as f:
filewriter = csv.writer(f, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
filewriter.writerow(["date", "price1", "price2", "price3", "pair"])
for row in prices_rows:
filewriter.writerow(row)
f.close()
def draw_a_graph():
pass
| 41.37931
| 149
| 0.615417
|
794c811bb00733aada27bf14e84fffbf194033f4
| 1,211
|
py
|
Python
|
python_practice/strings_part_one.py
|
WorldPolice/python_practice
|
aa18734961301895796f64ff7893493cd695c2c6
|
[
"Apache-2.0"
] | null | null | null |
python_practice/strings_part_one.py
|
WorldPolice/python_practice
|
aa18734961301895796f64ff7893493cd695c2c6
|
[
"Apache-2.0"
] | 2
|
2018-12-10T00:56:44.000Z
|
2019-01-21T21:52:21.000Z
|
python_practice/strings_part_one.py
|
WorldPolice/python_practice
|
aa18734961301895796f64ff7893493cd695c2c6
|
[
"Apache-2.0"
] | 3
|
2018-12-08T22:48:05.000Z
|
2018-12-11T01:19:15.000Z
|
# -*- coding: utf-8 -*-
def is_my_argument_a_string(maybe_string):
"""In this challenge you need check to see what type of object
the variable 'maybe_string' is. If it is a string type please return a true
if it is not a string object please return false.
"""
return isinstance(maybe_string, str)
def concatonate_my_arguments(one, two, three):
"""In this challenge you need to concatenate all the arguments together and return the result
The arguments will always be strings
"""
return one + two + three
def all_numbers(maybe_string_with_numbers):
"""In this challenge you need to return true if all of the characters in 'maybe_string_with_numbers' are numbers
if all of the characters in the string are not numbers please return false
"""
return str.isdigit(maybe_string_with_numbers)
def substring_test(substring, string):
"""In this challenge please return true if the string in the variable 'substring' is contained
in the string that is in the variable. If it is not return false.
Example where you return True:
substring = 'foo'
string = 'foobar'
"""
return str.casefold(substring) in str.casefold(string)
| 34.6
| 116
| 0.718415
|
794c811db6208d1fb27f44d8ea5b58127873f206
| 1,873
|
py
|
Python
|
setup.py
|
xingetouzi/Spirit
|
0d298f0b608c3f3a2dbbc152a8da042eef6d2e05
|
[
"MIT"
] | 1
|
2020-12-08T01:09:30.000Z
|
2020-12-08T01:09:30.000Z
|
setup.py
|
xingetouzi/Spirit
|
0d298f0b608c3f3a2dbbc152a8da042eef6d2e05
|
[
"MIT"
] | 7
|
2021-03-30T22:06:05.000Z
|
2021-03-30T22:06:08.000Z
|
setup.py
|
xingetouzi/Spirit
|
0d298f0b608c3f3a2dbbc152a8da042eef6d2e05
|
[
"MIT"
] | 6
|
2018-06-25T02:17:53.000Z
|
2020-12-08T01:09:32.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import os
import io
from setuptools import setup, find_packages
BASE_DIR = os.path.join(os.path.dirname(__file__))
with io.open(os.path.join(BASE_DIR, 'README.md'), encoding='utf-8') as f:
README = f.read()
VERSION = __import__('spirit').__version__
with io.open(os.path.join(BASE_DIR, 'requirements.txt'), encoding='utf-8') as fh:
REQUIREMENTS = fh.read()
if sys.platform.startswith(('win32', 'darwin')):
PYTHON_MAGIC_DEP = ['python-magic-bin==0.4.14']
else: # Linux?
PYTHON_MAGIC_DEP = ['python-magic==0.4.15']
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-spirit',
version=VERSION,
description='Spirit is a Python based forum powered by Django.',
author='Esteban Castro Borsani',
author_email='ecastroborsani@gmail.com',
long_description=README,
url='http://spirit-project.com/',
packages=find_packages(),
test_suite="runtests.start",
entry_points="""
[console_scripts]
spirit=spirit.extra.bin.spirit:main
""",
include_package_data=True,
zip_safe=False,
install_requires=REQUIREMENTS,
extras_require={
'files': PYTHON_MAGIC_DEP},
license='MIT License',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| 29.265625
| 81
| 0.662573
|
794c813d8f2ef8bcbf083737f0e01548ceee3649
| 9,604
|
py
|
Python
|
ceilosca/tests/functional/api/v2/test_api_with_monasca_driver.py
|
b-com/ceilosca
|
fa130de7bb0fb0b5f481f455dd678071fb2a72c9
|
[
"Apache-2.0"
] | null | null | null |
ceilosca/tests/functional/api/v2/test_api_with_monasca_driver.py
|
b-com/ceilosca
|
fa130de7bb0fb0b5f481f455dd678071fb2a72c9
|
[
"Apache-2.0"
] | null | null | null |
ceilosca/tests/functional/api/v2/test_api_with_monasca_driver.py
|
b-com/ceilosca
|
fa130de7bb0fb0b5f481f455dd678071fb2a72c9
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2015 Hewlett Packard
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test api with Monasca driver
"""
import mock
import pkg_resources
from oslo_config import cfg
from oslo_config import fixture as fixture_config
from oslotest import mockpatch
from stevedore import driver
from stevedore import extension
from ceilometer import storage
from ceilometer.tests import base as test_base
from oslo_policy import opts
import pecan
import pecan.testing
OPT_GROUP_NAME = 'keystone_authtoken'
cfg.CONF.import_group(OPT_GROUP_NAME, "keystonemiddleware.auth_token")
class TestApi(test_base.BaseTestCase):
# TODO(Unresolved comment from git review: Can we include CM-api test
# cases for get_samples in
# ceilometer/tests/api/v2/test_api_with_monasca_driver.py?)
def _get_driver_from_entry_point(self, entry_point, namespace):
ep = pkg_resources.EntryPoint.parse(entry_point)
a_driver = extension.Extension('con_driver', ep,
ep.load(require=False), None)
mgr = driver.DriverManager.make_test_instance(
a_driver, namespace=namespace
)
mgr._init_plugins([a_driver])
return mgr
def get_connection_with_mock_driver_manager(self, url, namespace):
mgr = self._get_driver_from_entry_point(
entry_point='monasca = ceilosca.storage.impl_monasca:Connection',
namespace='ceilometer.metering.storage')
return mgr.driver(url)
def get_publisher_with_mock_driver_manager(self, url, namespace):
mgr = self._get_driver_from_entry_point(
entry_point='monasca = ceilosca.publisher.monclient:'
'MonascaPublisher',
namespace='ceilometer.publisher')
return mgr.driver(url)
def setUp(self):
super(TestApi, self).setUp()
self.PATH_PREFIX = '/v2'
self.CONF = self.useFixture(fixture_config.Config()).conf
self.CONF([], project='ceilometer', validate_default_values=True)
self.setup_messaging(self.CONF)
opts.set_defaults(self.CONF)
self.CONF.set_override("auth_version", "v2.0",
group=OPT_GROUP_NAME)
self.CONF.set_override("policy_file",
self.path_get('etc/ceilometer/policy.json'),
group='oslo_policy')
self.CONF.import_opt('pipeline_cfg_file', 'ceilometer.pipeline')
self.CONF.set_override(
'pipeline_cfg_file',
self.path_get('etc/ceilometer/pipeline.yaml')
)
self.CONF.import_opt('monasca_mappings',
'ceilosca.publisher.monasca_data_filter',
group='monasca')
self.CONF.set_override(
'monasca_mappings',
self.path_get('etc/ceilometer/monasca_field_definitions.yaml'),
group='monasca'
)
with mock.patch("ceilosca.monasca_client.Client") as mock_client,\
mock.patch('ceilometer.storage.get_connection') as \
get_storage_conn, \
mock.patch('ceilometer.publisher.get_publisher') as get_pub:
get_storage_conn.side_effect = (
self.get_connection_with_mock_driver_manager)
get_pub.side_effect = self.get_publisher_with_mock_driver_manager
self.mock_mon_client = mock_client
self.conn = storage.get_connection('monasca://127.0.0.1:8080',
'ceilometer.metering.storage')
self.useFixture(mockpatch.Patch(
'ceilometer.storage.get_connection',
return_value=self.conn))
self.app = self._make_app()
def _make_app(self, enable_acl=False):
self.config = {
'app': {
'root': 'ceilometer.api.controllers.root.RootController',
'modules': ['ceilometer.api'],
'enable_acl': enable_acl,
},
'wsme': {
'debug': True,
},
}
return pecan.testing.load_test_app(self.config)
def get_json(self, path, expect_errors=False, headers=None,
extra_environ=None, q=None, groupby=None, status=None,
override_params=None, **params):
"""Sends simulated HTTP GET request to Pecan test app.
:param path: url path of target service
:param expect_errors: boolean value whether an error is expected based
on request
:param headers: A dictionary of headers to send along with the request
:param extra_environ: A dictionary of environ variables to send along
with the request
:param q: list of queries consisting of: field, value, op, and type
keys
:param groupby: list of fields to group by
:param status: Expected status code of response
:param override_params: literally encoded query param string
:param params: content for wsgi.input of request
"""
q = q or []
groupby = groupby or []
full_path = self.PATH_PREFIX + path
if override_params:
all_params = override_params
else:
query_params = {'q.field': [],
'q.value': [],
'q.op': [],
'q.type': [],
}
for query in q:
for name in ['field', 'op', 'value', 'type']:
query_params['q.%s' % name].append(query.get(name, ''))
all_params = {}
all_params.update(params)
if q:
all_params.update(query_params)
if groupby:
all_params.update({'groupby': groupby})
response = self.app.get(full_path,
params=all_params,
headers=headers,
extra_environ=extra_environ,
expect_errors=expect_errors,
status=status)
if not expect_errors:
response = response.json
return response
class TestListMeters(TestApi):
def setUp(self):
super(TestListMeters, self).setUp()
self.meter_payload = [{'name': 'm1',
'dimensions': {
'type': 'gauge',
'unit': 'any',
'resource_id': 'resource-1',
'project_id': 'project-1',
'user_id': 'user-1',
'source': 'source'}},
{'name': 'm2',
'dimensions': {
'type': 'delta',
'unit': 'any',
'resource_id': 'resource-1',
'project_id': 'project-1',
'user_id': 'user-1',
'source': 'source'}}]
def test_empty(self):
data = self.get_json('/meters')
self.assertEqual([], data)
def test_get_meters(self):
mnl_mock = self.mock_mon_client().metrics_list
mnl_mock.return_value = self.meter_payload
data = self.get_json('/meters')
self.assertEqual(True, mnl_mock.called)
self.assertEqual(1, mnl_mock.call_count)
self.assertEqual(2, len(data))
(self.assertIn(meter['name'],
[payload.get('name') for payload in
self.meter_payload]) for meter in data)
def test_get_meters_query_with_project_resource(self):
mnl_mock = self.mock_mon_client().metrics_list
mnl_mock.return_value = self.meter_payload
self.get_json('/meters',
q=[{'field': 'resource_id',
'value': 'resource-1'},
{'field': 'project_id',
'value': 'project-1'}])
self.assertEqual(True, mnl_mock.called)
self.assertEqual(1, mnl_mock.call_count)
self.assertEqual(dict(dimensions=dict(resource_id=u'resource-1',
project_id=u'project-1'),
limit=100),
mnl_mock.call_args[1])
def test_get_meters_query_with_user(self):
mnl_mock = self.mock_mon_client().metrics_list
mnl_mock.return_value = self.meter_payload
self.get_json('/meters',
q=[{'field': 'user_id',
'value': 'user-1'}])
self.assertEqual(True, mnl_mock.called)
self.assertEqual(1, mnl_mock.call_count)
self.assertEqual(dict(dimensions=dict(user_id=u'user-1'),
limit=100),
mnl_mock.call_args[1])
| 38.570281
| 78
| 0.556851
|
794c81636504cf46f4ef81b3be3229c4e6951504
| 546
|
py
|
Python
|
easy_thumbnails/test_settings.py
|
emschorsch/easy-thumbnails
|
769fddaada30eea9564ab459f120364da5888711
|
[
"BSD-3-Clause"
] | 1
|
2021-11-08T09:45:52.000Z
|
2021-11-08T09:45:52.000Z
|
easy_thumbnails/test_settings.py
|
emschorsch/easy-thumbnails
|
769fddaada30eea9564ab459f120364da5888711
|
[
"BSD-3-Clause"
] | null | null | null |
easy_thumbnails/test_settings.py
|
emschorsch/easy-thumbnails
|
769fddaada30eea9564ab459f120364da5888711
|
[
"BSD-3-Clause"
] | null | null | null |
import os
SITE_ID = 1
MEDIA_ROOT = os.path.normcase(os.path.dirname(os.path.abspath(__file__)))
MEDIA_URL = '/media/'
DATABASE_ENGINE = 'sqlite3'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.sites',
'django.contrib.auth',
'django.contrib.admin',
'easy_thumbnails',
'easy_thumbnails.tests',
]
# This is only needed for the 1.4.X test environment
USE_TZ = True
SECRET_KEY = 'easy'
| 18.2
| 73
| 0.652015
|
794c829df5daea5f3f12925d88730e5f629a5fa8
| 203,703
|
py
|
Python
|
py65/tests/devices/test_mpu6502.py
|
MorrisMA/py65
|
bda1553ff88fc577944bde3d7cb3e75a3b83ccfa
|
[
"BSD-3-Clause"
] | null | null | null |
py65/tests/devices/test_mpu6502.py
|
MorrisMA/py65
|
bda1553ff88fc577944bde3d7cb3e75a3b83ccfa
|
[
"BSD-3-Clause"
] | null | null | null |
py65/tests/devices/test_mpu6502.py
|
MorrisMA/py65
|
bda1553ff88fc577944bde3d7cb3e75a3b83ccfa
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import sys
import Py65MAM.py65.assembler
import Py65MAM.py65.devices.mpu6502
class Common6502Tests:
"""Tests common to 6502-based microprocessors"""
# Reset
def test_reset_sets_registers_to_initial_states(self):
mpu = self._make_mpu()
mpu.reset()
self.assertEqual(0xFF, mpu.sp)
self.assertEqual(0, mpu.a)
self.assertEqual(0, mpu.x)
self.assertEqual(0, mpu.y)
self.assertEqual(mpu.BREAK | mpu.UNUSED, mpu.p)
# ADC Absolute
def test_adc_bcd_off_absolute_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0
# $0000 ADC $C000
self._write(mpu.memory, 0x0000, (0x6D, 0x00, 0xC0))
self.assertEqual(0x10000, len(mpu.memory))
mpu.memory[0xC000] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_absolute_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.p |= mpu.CARRY
# $0000 ADC $C000
self._write(mpu.memory, 0x0000, (0x6D, 0x00, 0xC0))
mpu.memory[0xC000] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_absolute_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
# $0000 ADC $C000
self._write(mpu.memory, 0x0000, (0x6D, 0x00, 0xC0))
mpu.memory[0xC000] = 0xFE
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_absolute_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
# $0000 ADC $C000
self._write(mpu.memory, 0x0000, (0x6D, 0x00, 0xC0))
mpu.memory[0xC000] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_absolute_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $C000
self._write(mpu.memory, 0x0000, (0x6D, 0x00, 0xC0))
mpu.memory[0xC000] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_absolute_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $C000
self._write(mpu.memory, 0x0000, (0x6D, 0x00, 0xC0))
mpu.memory[0xC000] = 0xff
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_absolute_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
# $0000 ADC $C000
self._write(mpu.memory, 0x0000, (0x6D, 0x00, 0xC0))
mpu.memory[0xC000] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_absolute_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
# $0000 ADC $C000
self._write(mpu.memory, 0x0000, (0x6D, 0x00, 0xC0))
mpu.memory[0xC000] = 0xff
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_absolute_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
mpu.a = 0x40
# $0000 ADC $C000
self._write(mpu.memory, 0x0000, (0x6D, 0x00, 0xC0))
mpu.memory[0xC000] = 0x40
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ADC Zero Page
def test_adc_bcd_off_zp_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0
# $0000 ADC $00B0
self._write(mpu.memory, 0x0000, (0x65, 0xB0))
mpu.memory[0x00B0] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_zp_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.p |= mpu.CARRY
# $0000 ADC $00B0
self._write(mpu.memory, 0x0000, (0x65, 0xB0))
mpu.memory[0x00B0] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_zp_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
# $0000 ADC $00B0
self._write(mpu.memory, 0x0000, (0x65, 0xB0))
mpu.memory[0x00B0] = 0xFE
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_zp_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
# $0000 ADC $00B0
self._write(mpu.memory, 0x0000, (0x65, 0xB0))
mpu.memory[0x00B0] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_zp_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $00B0
self._write(mpu.memory, 0x0000, (0x65, 0xB0))
mpu.memory[0x00B0] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $00B0
self._write(mpu.memory, 0x0000, (0x65, 0xB0))
mpu.memory[0x00B0] = 0xff
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
# $0000 ADC $00B0
self._write(mpu.memory, 0x0000, (0x65, 0xB0))
mpu.memory[0x00B0] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
# $0000 ADC $00B0
self._write(mpu.memory, 0x0000, (0x65, 0xB0))
mpu.memory[0x00B0] = 0xff
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.a = 0x40
mpu.p &= ~(mpu.OVERFLOW)
# $0000 ADC $00B0
self._write(mpu.memory, 0x0000, (0x65, 0xB0))
mpu.memory[0x00B0] = 0x40
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ADC Immediate
def test_adc_bcd_off_immediate_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0
# $0000 ADC #$00
self._write(mpu.memory, 0x0000, (0x69, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_immediate_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.p |= mpu.CARRY
# $0000 ADC #$00
self._write(mpu.memory, 0x0000, (0x69, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_immediate_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
# $0000 ADC #$FE
self._write(mpu.memory, 0x0000, (0x69, 0xFE))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_immediate_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
# $0000 ADC #$FF
self._write(mpu.memory, 0x0000, (0x69, 0xFF))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_immediate_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC #$01
self._write(mpu.memory, 0x000, (0x69, 0x01))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_immediate_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC #$FF
self._write(mpu.memory, 0x000, (0x69, 0xff))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_immediate_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
# $0000 ADC #$01
self._write(mpu.memory, 0x000, (0x69, 0x01))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_immediate_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
# $0000 ADC #$FF
self._write(mpu.memory, 0x000, (0x69, 0xff))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_immediate_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.a = 0x40
# $0000 ADC #$40
self._write(mpu.memory, 0x0000, (0x69, 0x40))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_on_immediate_79_plus_00_carry_set(self):
mpu = self._make_mpu()
mpu.p |= mpu.DECIMAL
mpu.p |= mpu.CARRY
mpu.a = 0x79
# $0000 ADC #$00
self._write(mpu.memory, 0x0000, (0x69, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_adc_bcd_on_immediate_6f_plus_00_carry_set(self):
mpu = self._make_mpu()
mpu.p |= mpu.DECIMAL
mpu.p |= mpu.CARRY
mpu.a = 0x6f
# $0000 ADC #$00
self._write(mpu.memory, 0x0000, (0x69, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x76, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_adc_bcd_on_immediate_9c_plus_9d(self):
mpu = self._make_mpu()
mpu.p |= mpu.DECIMAL
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x9c
# $0000 ADC #$9d
# $0002 ADC #$9d
self._write(mpu.memory, 0x0000, (0x69, 0x9d))
self._write(mpu.memory, 0x0002, (0x69, 0x9d))
mpu.step()
self.assertEqual(0x9f, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
mpu.step()
self.assertEqual(0x0004, mpu.pc)
self.assertEqual(0x93, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ADC Absolute, X-Indexed
def test_adc_bcd_off_abs_x_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_abs_x_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_abs_x_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
mpu.x = 0x03
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0xFE
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_abs_x_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
mpu.x = 0x03
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_abs_x_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_x_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0xff
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_x_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_x_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0xff
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_x_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
mpu.a = 0x40
mpu.x = 0x03
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0x40
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ADC Absolute, Y-Indexed
def test_adc_bcd_off_abs_y_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.y = 0x03
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_abs_y_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.y = 0x03
mpu.p |= mpu.CARRY
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_abs_y_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
mpu.y = 0x03
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0xFE
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_abs_y_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
mpu.y = 0x03
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_abs_y_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_y_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_y_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_y_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_y_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
mpu.a = 0x40
mpu.y = 0x03
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0x40
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ADC Zero Page, X-Indexed
def test_adc_bcd_off_zp_x_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_zp_x_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_zp_x_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFE
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_zp_x_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_zp_x_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_x_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_x_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_x_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xff
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_x_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
mpu.a = 0x40
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x40
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ADC Indirect, Indexed (X)
def test_adc_bcd_off_ind_indexed_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_ind_indexed_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_ind_indexed_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
mpu.x = 0x03
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFE
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_ind_indexed_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
mpu.x = 0x03
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_ind_indexed_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
mpu.x = 0x03
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_ind_indexed_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
mpu.x = 0x03
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_ind_indexed_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
mpu.x = 0x03
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_ind_indexed_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
mpu.x = 0x03
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_ind_indexed_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
mpu.a = 0x40
mpu.x = 0x03
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x40
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ADC Indexed, Indirect (Y)
def test_adc_bcd_off_indexed_ind_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.y = 0x03
# $0000 ADC ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x71, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_indexed_ind_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.y = 0x03
mpu.p |= mpu.CARRY
# $0000 ADC ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x71, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_indexed_ind_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
mpu.y = 0x03
# $0000 ADC ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x71, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0xFE
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_indexed_ind_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
mpu.y = 0x03
# $0000 ADC ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x71, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_indexed_ind_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
mpu.y = 0x03
# $0000 $0000 ADC ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x71, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_indexed_ind_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
mpu.y = 0x03
# $0000 ADC ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x71, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_indexed_ind_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
mpu.y = 0x03
# $0000 ADC ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x71, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_indexed_ind_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
mpu.y = 0x03
# $0000 $0000 ADC ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x71, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_indexed_ind_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
mpu.a = 0x40
mpu.y = 0x03
# $0000 ADC ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x71, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x40
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
# AND (Absolute)
def test_and_absolute_all_zeros_setting_zero_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 AND $ABCD
self._write(mpu.memory, 0x0000, (0x2D, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_and_absolute_zeros_and_ones_setting_negative_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 AND $ABCD
self._write(mpu.memory, 0x0000, (0x2D, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0xAA
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# AND (Absolute)
def test_and_zp_all_zeros_setting_zero_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 AND $0010
self._write(mpu.memory, 0x0000, (0x25, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_and_zp_zeros_and_ones_setting_negative_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 AND $0010
self._write(mpu.memory, 0x0000, (0x25, 0x10))
mpu.memory[0x0010] = 0xAA
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# AND (Immediate)
def test_and_immediate_all_zeros_setting_zero_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 AND #$00
self._write(mpu.memory, 0x0000, (0x29, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_and_immediate_zeros_and_ones_setting_negative_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 AND #$AA
self._write(mpu.memory, 0x0000, (0x29, 0xAA))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# AND (Absolute, X-Indexed)
def test_and_abs_x_all_zeros_setting_zero_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
# $0000 AND $ABCD,X
self._write(mpu.memory, 0x0000, (0x3d, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_and_abs_x_zeros_and_ones_setting_negative_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
# $0000 AND $ABCD,X
self._write(mpu.memory, 0x0000, (0x3d, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0xAA
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# AND (Absolute, Y-Indexed)
def test_and_abs_y_all_zeros_setting_zero_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.y = 0x03
# $0000 AND $ABCD,X
self._write(mpu.memory, 0x0000, (0x39, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_and_abs_y_zeros_and_ones_setting_negative_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.y = 0x03
# $0000 AND $ABCD,X
self._write(mpu.memory, 0x0000, (0x39, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0xAA
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# AND Indirect, Indexed (X)
def test_and_ind_indexed_x_all_zeros_setting_zero_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
# $0000 AND ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x21, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_and_ind_indexed_x_zeros_and_ones_setting_negative_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
# $0000 AND ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x21, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0xAA
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# AND Indexed, Indirect (Y)
def test_and_indexed_ind_y_all_zeros_setting_zero_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.y = 0x03
# $0000 AND ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x31, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_and_indexed_ind_y_zeros_and_ones_setting_negative_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.y = 0x03
# $0000 AND ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x31, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0xAA
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# AND Zero Page, X-Indexed
def test_and_zp_x_all_zeros_setting_zero_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
# $0000 AND $0010,X
self._write(mpu.memory, 0x0000, (0x35, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_and_zp_x_all_zeros_and_ones_setting_negative_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
# $0000 AND $0010,X
self._write(mpu.memory, 0x0000, (0x35, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xAA
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ASL Accumulator
def test_asl_accumulator_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
# $0000 ASL A
mpu.memory[0x0000] = 0x0A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_asl_accumulator_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x40
# $0000 ASL A
mpu.memory[0x0000] = 0x0A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_asl_accumulator_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.a = 0x7F
# $0000 ASL A
mpu.memory[0x0000] = 0x0A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xFE, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_asl_accumulator_shifts_out_one(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 ASL A
mpu.memory[0x0000] = 0x0A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xFE, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_asl_accumulator_80_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x80
mpu.p &= ~(mpu.ZERO)
# $0000 ASL A
mpu.memory[0x0000] = 0x0A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
# ASL Absolute
def test_asl_absolute_sets_z_flag(self):
mpu = self._make_mpu()
# $0000 ASL $ABCD
self._write(mpu.memory, 0x0000, (0x0E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_asl_absolute_sets_n_flag(self):
mpu = self._make_mpu()
# $0000 ASL $ABCD
self._write(mpu.memory, 0x0000, (0x0E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x40
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.memory[0xABCD])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_asl_absolute_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.a = 0xAA
# $0000 ASL $ABCD
self._write(mpu.memory, 0x0000, (0x0E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x7F
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(0xFE, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_asl_absolute_shifts_out_one(self):
mpu = self._make_mpu()
mpu.a = 0xAA
# $0000 ASL $ABCD
self._write(mpu.memory, 0x0000, (0x0E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(0xFE, mpu.memory[0xABCD])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ASL Zero Page
def test_asl_zp_sets_z_flag(self):
mpu = self._make_mpu()
# $0000 ASL $0010
self._write(mpu.memory, 0x0000, (0x06, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_asl_zp_sets_n_flag(self):
mpu = self._make_mpu()
# $0000 ASL $0010
self._write(mpu.memory, 0x0000, (0x06, 0x10))
mpu.memory[0x0010] = 0x40
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.memory[0x0010])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_asl_zp_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.a = 0xAA
# $0000 ASL $0010
self._write(mpu.memory, 0x0000, (0x06, 0x10))
mpu.memory[0x0010] = 0x7F
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(0xFE, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_asl_zp_shifts_out_one(self):
mpu = self._make_mpu()
mpu.a = 0xAA
# $0000 ASL $0010
self._write(mpu.memory, 0x0000, (0x06, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(0xFE, mpu.memory[0x0010])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ASL Absolute, X-Indexed
def test_asl_abs_x_indexed_sets_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
# $0000 ASL $ABCD,X
self._write(mpu.memory, 0x0000, (0x1E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_asl_abs_x_indexed_sets_n_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
# $0000 ASL $ABCD,X
self._write(mpu.memory, 0x0000, (0x1E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x40
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_asl_abs_x_indexed_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.a = 0xAA
mpu.x = 0x03
# $0000 ASL $ABCD,X
self._write(mpu.memory, 0x0000, (0x1E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x7F
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(0xFE, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_asl_abs_x_indexed_shifts_out_one(self):
mpu = self._make_mpu()
mpu.a = 0xAA
mpu.x = 0x03
# $0000 ASL $ABCD,X
self._write(mpu.memory, 0x0000, (0x1E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(0xFE, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ASL Zero Page, X-Indexed
def test_asl_zp_x_indexed_sets_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
# $0000 ASL $0010,X
self._write(mpu.memory, 0x0000, (0x16, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_asl_zp_x_indexed_sets_n_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
# $0000 ASL $0010,X
self._write(mpu.memory, 0x0000, (0x16, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x40
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_asl_zp_x_indexed_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.a = 0xAA
# $0000 ASL $0010,X
self._write(mpu.memory, 0x0000, (0x16, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x7F
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(0xFE, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_asl_zp_x_indexed_shifts_out_one(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.a = 0xAA
# $0000 ASL $0010,X
self._write(mpu.memory, 0x0000, (0x16, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(0xFE, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# BCC
def test_bcc_carry_clear_branches_relative_forward(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 BCC +6
self._write(mpu.memory, 0x0000, (0x90, 0x06))
mpu.step()
self.assertEqual(0x0002 + 0x06, mpu.pc)
def test_bcc_carry_clear_branches_relative_backward(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.pc = 0x0050
rel = (0x06 ^ 0xFF + 1) # two's complement of 6
# $0000 BCC -6
self._write(mpu.memory, 0x0050, (0x90, rel))
mpu.step()
self.assertEqual(0x0052 + rel, mpu.pc)
def test_bcc_carry_set_does_not_branch(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 BCC +6
self._write(mpu.memory, 0x0000, (0x90, 0x06))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
# BCS
def test_bcs_carry_set_branches_relative_forward(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 BCS +6
self._write(mpu.memory, 0x0000, (0xB0, 0x06))
mpu.step()
self.assertEqual(0x0002 + 0x06, mpu.pc)
def test_bcs_carry_set_branches_relative_backward(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
mpu.pc = 0x0050
rel = (0x06 ^ 0xFF + 1) # two's complement of 6
# $0000 BCS -6
self._write(mpu.memory, 0x0050, (0xB0, rel))
mpu.step()
self.assertEqual(0x0052 + rel, mpu.pc)
def test_bcs_carry_clear_does_not_branch(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 BCS +6
self._write(mpu.memory, 0x0000, (0xB0, 0x06))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
# BEQ
def test_beq_zero_set_branches_relative_forward(self):
mpu = self._make_mpu()
mpu.p |= mpu.ZERO
# $0000 BEQ +6
self._write(mpu.memory, 0x0000, (0xF0, 0x06))
mpu.step()
self.assertEqual(0x0002 + 0x06, mpu.pc)
def test_beq_zero_set_branches_relative_backward(self):
mpu = self._make_mpu()
mpu.p |= mpu.ZERO
mpu.pc = 0x0050
rel = (0x06 ^ 0xFF + 1) # two's complement of 6
# $0000 BEQ -6
self._write(mpu.memory, 0x0050, (0xF0, rel))
mpu.step()
self.assertEqual(0x0052 + rel, mpu.pc)
def test_beq_zero_clear_does_not_branch(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
# $0000 BEQ +6
self._write(mpu.memory, 0x0000, (0xF0, 0x06))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
# BIT (Absolute)
def test_bit_abs_copies_bit_7_of_memory_to_n_flag_when_0(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
# $0000 BIT $FEED
self._write(mpu.memory, 0x0000, (0x2C, 0xED, 0xFE))
mpu.memory[0xFEED] = 0xFF
mpu.a = 0xFF
mpu.step()
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_bit_abs_copies_bit_7_of_memory_to_n_flag_when_1(self):
mpu = self._make_mpu()
mpu.p |= mpu.NEGATIVE
# $0000 BIT $FEED
self._write(mpu.memory, 0x0000, (0x2C, 0xED, 0xFE))
mpu.memory[0xFEED] = 0x00
mpu.a = 0xFF
mpu.step()
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_bit_abs_copies_bit_6_of_memory_to_v_flag_when_0(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
# $0000 BIT $FEED
self._write(mpu.memory, 0x0000, (0x2C, 0xED, 0xFE))
mpu.memory[0xFEED] = 0xFF
mpu.a = 0xFF
mpu.step()
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_bit_abs_copies_bit_6_of_memory_to_v_flag_when_1(self):
mpu = self._make_mpu()
mpu.p |= mpu.OVERFLOW
# $0000 BIT $FEED
self._write(mpu.memory, 0x0000, (0x2C, 0xED, 0xFE))
mpu.memory[0xFEED] = 0x00
mpu.a = 0xFF
mpu.step()
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_bit_abs_stores_result_of_and_in_z_preserves_a_when_1(self):
mpu = self._make_mpu()
mpu.p &= ~mpu.ZERO
# $0000 BIT $FEED
self._write(mpu.memory, 0x0000, (0x2C, 0xED, 0xFE))
mpu.memory[0xFEED] = 0x00
mpu.a = 0x01
mpu.step()
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0x00, mpu.memory[0xFEED])
def test_bit_abs_stores_result_of_and_when_nonzero_in_z_preserves_a(self):
mpu = self._make_mpu()
mpu.p |= mpu.ZERO
# $0000 BIT $FEED
self._write(mpu.memory, 0x0000, (0x2C, 0xED, 0xFE))
mpu.memory[0xFEED] = 0x01
mpu.a = 0x01
mpu.step()
self.assertEqual(0, mpu.p & mpu.ZERO) # result of AND is non-zero
self.assertEqual(0x01, mpu.a)
self.assertEqual(0x01, mpu.memory[0xFEED])
def test_bit_abs_stores_result_of_and_when_zero_in_z_preserves_a(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
# $0000 BIT $FEED
self._write(mpu.memory, 0x0000, (0x2C, 0xED, 0xFE))
mpu.memory[0xFEED] = 0x00
mpu.a = 0x01
mpu.step()
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO) # result of AND is zero
self.assertEqual(0x01, mpu.a)
self.assertEqual(0x00, mpu.memory[0xFEED])
# BIT (Zero Page)
def test_bit_zp_copies_bit_7_of_memory_to_n_flag_when_0(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
# $0000 BIT $0010
self._write(mpu.memory, 0x0000, (0x24, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.a = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(3, mpu.processorCycles)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_bit_zp_copies_bit_7_of_memory_to_n_flag_when_1(self):
mpu = self._make_mpu()
mpu.p |= mpu.NEGATIVE
# $0000 BIT $0010
self._write(mpu.memory, 0x0000, (0x24, 0x10))
mpu.memory[0x0010] = 0x00
mpu.a = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(3, mpu.processorCycles)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_bit_zp_copies_bit_6_of_memory_to_v_flag_when_0(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
# $0000 BIT $0010
self._write(mpu.memory, 0x0000, (0x24, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.a = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(3, mpu.processorCycles)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_bit_zp_copies_bit_6_of_memory_to_v_flag_when_1(self):
mpu = self._make_mpu()
mpu.p |= mpu.OVERFLOW
# $0000 BIT $0010
self._write(mpu.memory, 0x0000, (0x24, 0x10))
mpu.memory[0x0010] = 0x00
mpu.a = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(3, mpu.processorCycles)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_bit_zp_stores_result_of_and_in_z_preserves_a_when_1(self):
mpu = self._make_mpu()
mpu.p &= ~mpu.ZERO
# $0000 BIT $0010
self._write(mpu.memory, 0x0000, (0x24, 0x10))
mpu.memory[0x0010] = 0x00
mpu.a = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(3, mpu.processorCycles)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0x00, mpu.memory[0x0010])
def test_bit_zp_stores_result_of_and_when_nonzero_in_z_preserves_a(self):
mpu = self._make_mpu()
mpu.p |= mpu.ZERO
# $0000 BIT $0010
self._write(mpu.memory, 0x0000, (0x24, 0x10))
mpu.memory[0x0010] = 0x01
mpu.a = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(3, mpu.processorCycles)
self.assertEqual(0, mpu.p & mpu.ZERO) # result of AND is non-zero
self.assertEqual(0x01, mpu.a)
self.assertEqual(0x01, mpu.memory[0x0010])
def test_bit_zp_stores_result_of_and_when_zero_in_z_preserves_a(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
# $0000 BIT $0010
self._write(mpu.memory, 0x0000, (0x24, 0x10))
mpu.memory[0x0010] = 0x00
mpu.a = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(3, mpu.processorCycles)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO) # result of AND is zero
self.assertEqual(0x01, mpu.a)
self.assertEqual(0x00, mpu.memory[0x0010])
# BMI
def test_bmi_negative_set_branches_relative_forward(self):
mpu = self._make_mpu()
mpu.p |= mpu.NEGATIVE
# $0000 BMI +06
self._write(mpu.memory, 0x0000, (0x30, 0x06))
mpu.step()
self.assertEqual(0x0002 + 0x06, mpu.pc)
def test_bmi_negative_set_branches_relative_backward(self):
mpu = self._make_mpu()
mpu.p |= mpu.NEGATIVE
mpu.pc = 0x0050
# $0000 BMI -6
rel = (0x06 ^ 0xFF + 1) # two's complement of 6
self._write(mpu.memory, 0x0050, (0x30, rel))
mpu.step()
self.assertEqual(0x0052 + rel, mpu.pc)
def test_bmi_negative_clear_does_not_branch(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
# $0000 BEQ +6
self._write(mpu.memory, 0x0000, (0x30, 0x06))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
# BNE
def test_bne_zero_clear_branches_relative_forward(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
# $0000 BNE +6
self._write(mpu.memory, 0x0000, (0xD0, 0x06))
mpu.step()
self.assertEqual(0x0002 + 0x06, mpu.pc)
def test_bne_zero_clear_branches_relative_backward(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.pc = 0x0050
# $0050 BNE -6
rel = (0x06 ^ 0xFF + 1) # two's complement of 6
self._write(mpu.memory, 0x0050, (0xD0, rel))
mpu.step()
self.assertEqual(0x0052 + rel, mpu.pc)
def test_bne_zero_set_does_not_branch(self):
mpu = self._make_mpu()
mpu.p |= mpu.ZERO
# $0000 BNE +6
self._write(mpu.memory, 0x0000, (0xD0, 0x06))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
# BPL
def test_bpl_negative_clear_branches_relative_forward(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
# $0000 BPL +06
self._write(mpu.memory, 0x0000, (0x10, 0x06))
mpu.step()
self.assertEqual(0x0002 + 0x06, mpu.pc)
def test_bpl_negative_clear_branches_relative_backward(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.pc = 0x0050
# $0050 BPL -6
rel = (0x06 ^ 0xFF + 1) # two's complement of 6
self._write(mpu.memory, 0x0050, (0x10, rel))
mpu.step()
self.assertEqual(0x0052 + rel, mpu.pc)
def test_bpl_negative_set_does_not_branch(self):
mpu = self._make_mpu()
mpu.p |= mpu.NEGATIVE
# $0000 BPL +6
self._write(mpu.memory, 0x0000, (0x10, 0x06))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
# BRK
def test_brk_pushes_pc_plus_2_and_status_then_sets_pc_to_irq_vector(self):
mpu = self._make_mpu()
mpu.p = mpu.UNUSED
self._write(mpu.memory, 0xFFFE, (0xCD, 0xAB))
# $C000 BRK
mpu.memory[0xC000] = 0x00
mpu.pc = 0xC000
mpu.step()
self.assertEqual(0xABCD, mpu.pc)
self.assertEqual(0xC0, mpu.memory[0x1FF]) # PCH
self.assertEqual(0x02, mpu.memory[0x1FE]) # PCL
self.assertEqual(mpu.BREAK | mpu.UNUSED, mpu.memory[0x1FD]) # Status
self.assertEqual(0xFC, mpu.sp)
self.assertEqual(mpu.BREAK | mpu.UNUSED | mpu.INTERRUPT, mpu.p)
# BVC
def test_bvc_overflow_clear_branches_relative_forward(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
# $0000 BVC +6
self._write(mpu.memory, 0x0000, (0x50, 0x06))
mpu.step()
self.assertEqual(0x0002 + 0x06, mpu.pc)
def test_bvc_overflow_clear_branches_relative_backward(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
mpu.pc = 0x0050
rel = (0x06 ^ 0xFF + 1) # two's complement of 6
# $0050 BVC -6
self._write(mpu.memory, 0x0050, (0x50, rel))
mpu.step()
self.assertEqual(0x0052 + rel, mpu.pc)
def test_bvc_overflow_set_does_not_branch(self):
mpu = self._make_mpu()
mpu.p |= mpu.OVERFLOW
# $0000 BVC +6
self._write(mpu.memory, 0x0000, (0x50, 0x06))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
# BVS
def test_bvs_overflow_set_branches_relative_forward(self):
mpu = self._make_mpu()
mpu.p |= mpu.OVERFLOW
# $0000 BVS +6
self._write(mpu.memory, 0x0000, (0x70, 0x06))
mpu.step()
self.assertEqual(0x0002 + 0x06, mpu.pc)
def test_bvs_overflow_set_branches_relative_backward(self):
mpu = self._make_mpu()
mpu.p |= mpu.OVERFLOW
mpu.pc = 0x0050
rel = (0x06 ^ 0xFF + 1) # two's complement of 6
# $0050 BVS -6
self._write(mpu.memory, 0x0050, (0x70, rel))
mpu.step()
self.assertEqual(0x0052 + rel, mpu.pc)
def test_bvs_overflow_clear_does_not_branch(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
# $0000 BVS +6
self._write(mpu.memory, 0x0000, (0x70, 0x06))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
# CLC
def test_clc_clears_carry_flag(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 CLC
mpu.memory[0x0000] = 0x18
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0, mpu.p & mpu.CARRY)
# CLD
def test_cld_clears_decimal_flag(self):
mpu = self._make_mpu()
mpu.p |= mpu.DECIMAL
# $0000 CLD
mpu.memory[0x0000] = 0xD8
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0, mpu.p & mpu.DECIMAL)
# CLI
def test_cli_clears_interrupt_mask_flag(self):
mpu = self._make_mpu()
mpu.p |= mpu.INTERRUPT
# $0000 CLI
mpu.memory[0x0000] = 0x58
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0, mpu.p & mpu.INTERRUPT)
# CLV
def test_clv_clears_overflow_flag(self):
mpu = self._make_mpu()
mpu.p |= mpu.OVERFLOW
# $0000 CLV
mpu.memory[0x0000] = 0xB8
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
# DEC Absolute
def test_dec_abs_decrements_memory(self):
mpu = self._make_mpu()
# $0000 DEC 0xABCD
self._write(mpu.memory, 0x0000, (0xCE, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x10
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x0F, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dec_abs_below_00_rolls_over_and_sets_negative_flag(self):
mpu = self._make_mpu()
# $0000 DEC 0xABCD
self._write(mpu.memory, 0x0000, (0xCE, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_dec_abs_sets_zero_flag_when_decrementing_to_zero(self):
mpu = self._make_mpu()
# $0000 DEC 0xABCD
self._write(mpu.memory, 0x0000, (0xCE, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# DEC Zero Page
def test_dec_zp_decrements_memory(self):
mpu = self._make_mpu()
# $0000 DEC 0x0010
self._write(mpu.memory, 0x0000, (0xC6, 0x10))
mpu.memory[0x0010] = 0x10
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x0F, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dec_zp_below_00_rolls_over_and_sets_negative_flag(self):
mpu = self._make_mpu()
# $0000 DEC 0x0010
self._write(mpu.memory, 0x0000, (0xC6, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_dec_zp_sets_zero_flag_when_decrementing_to_zero(self):
mpu = self._make_mpu()
# $0000 DEC 0x0010
self._write(mpu.memory, 0x0000, (0xC6, 0x10))
mpu.memory[0x0010] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# DEC Absolute, X-Indexed
def test_dec_abs_x_decrements_memory(self):
mpu = self._make_mpu()
# $0000 DEC 0xABCD,X
self._write(mpu.memory, 0x0000, (0xDE, 0xCD, 0xAB))
mpu.x = 0x03
mpu.memory[0xABCD + mpu.x] = 0x10
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x0F, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dec_abs_x_below_00_rolls_over_and_sets_negative_flag(self):
mpu = self._make_mpu()
# $0000 DEC 0xABCD,X
self._write(mpu.memory, 0x0000, (0xDE, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_dec_abs_x_sets_zero_flag_when_decrementing_to_zero(self):
mpu = self._make_mpu()
# $0000 DEC 0xABCD,X
self._write(mpu.memory, 0x0000, (0xDE, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# DEC Zero Page, X-Indexed
def test_dec_zp_x_decrements_memory(self):
mpu = self._make_mpu()
# $0000 DEC 0x0010,X
self._write(mpu.memory, 0x0000, (0xD6, 0x10))
mpu.x = 0x03
mpu.memory[0x0010 + mpu.x] = 0x10
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x0F, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dec_zp_x_below_00_rolls_over_and_sets_negative_flag(self):
mpu = self._make_mpu()
# $0000 DEC 0x0010,X
self._write(mpu.memory, 0x0000, (0xD6, 0x10))
mpu.x = 0x03
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_dec_zp_x_sets_zero_flag_when_decrementing_to_zero(self):
mpu = self._make_mpu()
# $0000 DEC 0x0010,X
self._write(mpu.memory, 0x0000, (0xD6, 0x10))
mpu.x = 0x03
mpu.memory[0x0010 + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# DEX
def test_dex_decrements_x(self):
mpu = self._make_mpu()
mpu.x = 0x10
# $0000 DEX
mpu.memory[0x0000] = 0xCA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x0F, mpu.x)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dex_below_00_rolls_over_and_sets_negative_flag(self):
mpu = self._make_mpu()
mpu.x = 0x00
# $0000 DEX
mpu.memory[0x0000] = 0xCA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xFF, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dex_sets_zero_flag_when_decrementing_to_zero(self):
mpu = self._make_mpu()
mpu.x = 0x01
# $0000 DEX
mpu.memory[0x0000] = 0xCA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# DEY
def test_dey_decrements_y(self):
mpu = self._make_mpu()
mpu.y = 0x10
# $0000 DEY
mpu.memory[0x0000] = 0x88
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x0F, mpu.y)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dey_below_00_rolls_over_and_sets_negative_flag(self):
mpu = self._make_mpu()
mpu.y = 0x00
# $0000 DEY
mpu.memory[0x0000] = 0x88
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xFF, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_dey_sets_zero_flag_when_decrementing_to_zero(self):
mpu = self._make_mpu()
mpu.y = 0x01
# $0000 DEY
mpu.memory[0x0000] = 0x88
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
# EOR Absolute
def test_eor_absolute_flips_bits_over_setting_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
self._write(mpu.memory, 0x0000, (0x4D, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_eor_absolute_flips_bits_over_setting_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
self._write(mpu.memory, 0x0000, (0x4D, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# EOR Zero Page
def test_eor_zp_flips_bits_over_setting_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
self._write(mpu.memory, 0x0000, (0x45, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0xFF, mpu.memory[0x0010])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_eor_zp_flips_bits_over_setting_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
self._write(mpu.memory, 0x0000, (0x45, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(0xFF, mpu.memory[0x0010])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# EOR Immediate
def test_eor_immediate_flips_bits_over_setting_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
self._write(mpu.memory, 0x0000, (0x49, 0xFF))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_eor_immediate_flips_bits_over_setting_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
self._write(mpu.memory, 0x0000, (0x49, 0xFF))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# EOR Absolute, X-Indexed
def test_eor_abs_x_indexed_flips_bits_over_setting_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
self._write(mpu.memory, 0x0000, (0x5D, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_eor_abs_x_indexed_flips_bits_over_setting_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
self._write(mpu.memory, 0x0000, (0x5D, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# EOR Absolute, Y-Indexed
def test_eor_abs_y_indexed_flips_bits_over_setting_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.y = 0x03
self._write(mpu.memory, 0x0000, (0x59, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.y])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_eor_abs_y_indexed_flips_bits_over_setting_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.y = 0x03
self._write(mpu.memory, 0x0000, (0x59, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.y])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# EOR Indirect, Indexed (X)
def test_eor_ind_indexed_x_flips_bits_over_setting_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
self._write(mpu.memory, 0x0000, (0x41, 0x10)) # => EOR ($0010,X)
self._write(mpu.memory, 0x0013, (0xCD, 0xAB)) # => Vector to $ABCD
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_eor_ind_indexed_x_flips_bits_over_setting_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
self._write(mpu.memory, 0x0000, (0x41, 0x10)) # => EOR ($0010,X)
self._write(mpu.memory, 0x0013, (0xCD, 0xAB)) # => Vector to $ABCD
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# EOR Indexed, Indirect (Y)
def test_eor_indexed_ind_y_flips_bits_over_setting_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.y = 0x03
self._write(mpu.memory, 0x0000, (0x51, 0x10)) # => EOR ($0010),Y
self._write(mpu.memory, 0x0010, (0xCD, 0xAB)) # => Vector to $ABCD
mpu.memory[0xABCD + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.y])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_eor_indexed_ind_y_flips_bits_over_setting_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.y = 0x03
self._write(mpu.memory, 0x0000, (0x51, 0x10)) # => EOR ($0010),Y
self._write(mpu.memory, 0x0010, (0xCD, 0xAB)) # => Vector to $ABCD
mpu.memory[0xABCD + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.y])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# EOR Zero Page, X-Indexed
def test_eor_zp_x_indexed_flips_bits_over_setting_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
self._write(mpu.memory, 0x0000, (0x55, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0xFF, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_eor_zp_x_indexed_flips_bits_over_setting_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
self._write(mpu.memory, 0x0000, (0x55, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(0xFF, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# INC Absolute
def test_inc_abs_increments_memory(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xEE, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x09
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x0A, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_inc_abs_increments_memory_rolls_over_and_sets_zero_flag(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xEE, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_inc_abs_sets_negative_flag_when_incrementing_above_7F(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xEE, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x7F
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
# INC Zero Page
def test_inc_zp_increments_memory(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xE6, 0x10))
mpu.memory[0x0010] = 0x09
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x0A, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_inc_zp_increments_memory_rolls_over_and_sets_zero_flag(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xE6, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_inc_zp_sets_negative_flag_when_incrementing_above_7F(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xE6, 0x10))
mpu.memory[0x0010] = 0x7F
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
# INC Absolute, X-Indexed
def test_inc_abs_x_increments_memory(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xFE, 0xCD, 0xAB))
mpu.x = 0x03
mpu.memory[0xABCD + mpu.x] = 0x09
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x0A, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_inc_abs_x_increments_memory_rolls_over_and_sets_zero_flag(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xFE, 0xCD, 0xAB))
mpu.x = 0x03
mpu.memory[0xABCD + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_inc_abs_x_sets_negative_flag_when_incrementing_above_7F(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xFE, 0xCD, 0xAB))
mpu.x = 0x03
mpu.memory[0xABCD + mpu.x] = 0x7F
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
# INC Zero Page, X-Indexed
def test_inc_zp_x_increments_memory(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xF6, 0x10))
mpu.x = 0x03
mpu.memory[0x0010 + mpu.x] = 0x09
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x0A, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_inc_zp_x_increments_memory_rolls_over_and_sets_zero_flag(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xF6, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_inc_zp_x_sets_negative_flag_when_incrementing_above_7F(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xF6, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x7F
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
# INX
def test_inx_increments_x(self):
mpu = self._make_mpu()
mpu.x = 0x09
mpu.memory[0x0000] = 0xE8 # => INX
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x0A, mpu.x)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_inx_above_FF_rolls_over_and_sets_zero_flag(self):
mpu = self._make_mpu()
mpu.x = 0xFF
mpu.memory[0x0000] = 0xE8 # => INX
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_inx_sets_negative_flag_when_incrementing_above_7F(self):
mpu = self._make_mpu()
mpu.x = 0x7f
mpu.memory[0x0000] = 0xE8 # => INX
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
# INY
def test_iny_increments_y(self):
mpu = self._make_mpu()
mpu.y = 0x09
mpu.memory[0x0000] = 0xC8 # => INY
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x0A, mpu.y)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_iny_above_FF_rolls_over_and_sets_zero_flag(self):
mpu = self._make_mpu()
mpu.y = 0xFF
mpu.memory[0x0000] = 0xC8 # => INY
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_iny_sets_negative_flag_when_incrementing_above_7F(self):
mpu = self._make_mpu()
mpu.y = 0x7f
mpu.memory[0x0000] = 0xC8 # => INY
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
# JMP Absolute
def test_jmp_abs_jumps_to_absolute_address(self):
mpu = self._make_mpu()
# $0000 JMP $ABCD
self._write(mpu.memory, 0x0000, (0x4C, 0xCD, 0xAB))
mpu.step()
self.assertEqual(0xABCD, mpu.pc)
# JMP Indirect
def test_jmp_ind_jumps_to_indirect_address(self):
mpu = self._make_mpu()
# $0000 JMP ($ABCD)
self._write(mpu.memory, 0x0000, (0x6C, 0x00, 0x02))
self._write(mpu.memory, 0x0200, (0xCD, 0xAB))
mpu.step()
self.assertEqual(0xABCD, mpu.pc)
# JSR
def test_jsr_pushes_pc_plus_2_and_sets_pc(self):
mpu = self._make_mpu()
# $C000 JSR $FFD2
self._write(mpu.memory, 0xC000, (0x20, 0xD2, 0xFF))
mpu.pc = 0xC000
mpu.step()
self.assertEqual(0xFFD2, mpu.pc)
self.assertEqual(0xFD, mpu.sp)
self.assertEqual(0xC0, mpu.memory[0x01FF]) # PCH
self.assertEqual(0x02, mpu.memory[0x01FE]) # PCL+2
# LDA Absolute
def test_lda_absolute_loads_a_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
# $0000 LDA $ABCD
self._write(mpu.memory, 0x0000, (0xAD, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x80
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_lda_absolute_loads_a_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 LDA $ABCD
self._write(mpu.memory, 0x0000, (0xAD, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDA Zero Page
def test_lda_zp_loads_a_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
# $0000 LDA $0010
self._write(mpu.memory, 0x0000, (0xA5, 0x10))
mpu.memory[0x0010] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_lda_zp_loads_a_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 LDA $0010
self._write(mpu.memory, 0x0000, (0xA5, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDA Immediate
def test_lda_immediate_loads_a_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
# $0000 LDA #$80
self._write(mpu.memory, 0x0000, (0xA9, 0x80))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_lda_immediate_loads_a_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 LDA #$00
self._write(mpu.memory, 0x0000, (0xA9, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDA Absolute, X-Indexed
def test_lda_abs_x_indexed_loads_a_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
# $0000 LDA $ABCD,X
self._write(mpu.memory, 0x0000, (0xBD, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x80
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_lda_abs_x_indexed_loads_a_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
# $0000 LDA $ABCD,X
self._write(mpu.memory, 0x0000, (0xBD, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lda_abs_x_indexed_does_not_page_wrap(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.x = 0xFF
# $0000 LDA $0080,X
self._write(mpu.memory, 0x0000, (0xBD, 0x80, 0x00))
mpu.memory[0x0080 + mpu.x] = 0x42
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x42, mpu.a)
# LDA Absolute, Y-Indexed
def test_lda_abs_y_indexed_loads_a_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.y = 0x03
# $0000 LDA $ABCD,Y
self._write(mpu.memory, 0x0000, (0xB9, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x80
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_lda_abs_y_indexed_loads_a_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.y = 0x03
# $0000 LDA $ABCD,Y
self._write(mpu.memory, 0x0000, (0xB9, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lda_abs_y_indexed_does_not_page_wrap(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.y = 0xFF
# $0000 LDA $0080,X
self._write(mpu.memory, 0x0000, (0xB9, 0x80, 0x00))
mpu.memory[0x0080 + mpu.y] = 0x42
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x42, mpu.a)
# LDA Indirect, Indexed (X)
def test_lda_ind_indexed_x_loads_a_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
# $0000 LDA ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0xA1, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_lda_ind_indexed_x_loads_a_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
# $0000 LDA ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0xA1, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDA Indexed, Indirect (Y)
def test_lda_indexed_ind_y_loads_a_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.y = 0x03
# $0000 LDA ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0xB1, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_lda_indexed_ind_y_loads_a_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.y = 0x03
# $0000 LDA ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0xB1, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDA Zero Page, X-Indexed
def test_lda_zp_x_indexed_loads_a_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
# $0000 LDA $10,X
self._write(mpu.memory, 0x0000, (0xB5, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_lda_zp_x_indexed_loads_a_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
# $0000 LDA $10,X
self._write(mpu.memory, 0x0000, (0xB5, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDX Absolute
def test_ldx_absolute_loads_x_sets_n_flag(self):
mpu = self._make_mpu()
mpu.x = 0x00
# $0000 LDX $ABCD
self._write(mpu.memory, 0x0000, (0xAE, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x80
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldx_absolute_loads_x_sets_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0xFF
# $0000 LDX $ABCD
self._write(mpu.memory, 0x0000, (0xAE, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDX Zero Page
def test_ldx_zp_loads_x_sets_n_flag(self):
mpu = self._make_mpu()
mpu.x = 0x00
# $0000 LDX $0010
self._write(mpu.memory, 0x0000, (0xA6, 0x10))
mpu.memory[0x0010] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldx_zp_loads_x_sets_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0xFF
# $0000 LDX $0010
self._write(mpu.memory, 0x0000, (0xA6, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDX Immediate
def test_ldx_immediate_loads_x_sets_n_flag(self):
mpu = self._make_mpu()
mpu.x = 0x00
# $0000 LDX #$80
self._write(mpu.memory, 0x0000, (0xA2, 0x80))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldx_immediate_loads_x_sets_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0xFF
# $0000 LDX #$00
self._write(mpu.memory, 0x0000, (0xA2, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDX Absolute, Y-Indexed
def test_ldx_abs_y_indexed_loads_x_sets_n_flag(self):
mpu = self._make_mpu()
mpu.x = 0x00
mpu.y = 0x03
# $0000 LDX $ABCD,Y
self._write(mpu.memory, 0x0000, (0xBE, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x80
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldx_abs_y_indexed_loads_x_sets_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0xFF
mpu.y = 0x03
# $0000 LDX $ABCD,Y
self._write(mpu.memory, 0x0000, (0xBE, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDX Zero Page, Y-Indexed
def test_ldx_zp_y_indexed_loads_x_sets_n_flag(self):
mpu = self._make_mpu()
mpu.x = 0x00
mpu.y = 0x03
# $0000 LDX $0010,Y
self._write(mpu.memory, 0x0000, (0xB6, 0x10))
mpu.memory[0x0010 + mpu.y] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldx_zp_y_indexed_loads_x_sets_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0xFF
mpu.y = 0x03
# $0000 LDX $0010,Y
self._write(mpu.memory, 0x0000, (0xB6, 0x10))
mpu.memory[0x0010 + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDY Absolute
def test_ldy_absolute_loads_y_sets_n_flag(self):
mpu = self._make_mpu()
mpu.y = 0x00
# $0000 LDY $ABCD
self._write(mpu.memory, 0x0000, (0xAC, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x80
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldy_absolute_loads_y_sets_z_flag(self):
mpu = self._make_mpu()
mpu.y = 0xFF
# $0000 LDY $ABCD
self._write(mpu.memory, 0x0000, (0xAC, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDY Zero Page
def test_ldy_zp_loads_y_sets_n_flag(self):
mpu = self._make_mpu()
mpu.y = 0x00
# $0000 LDY $0010
self._write(mpu.memory, 0x0000, (0xA4, 0x10))
mpu.memory[0x0010] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldy_zp_loads_y_sets_z_flag(self):
mpu = self._make_mpu()
mpu.y = 0xFF
# $0000 LDY $0010
self._write(mpu.memory, 0x0000, (0xA4, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDY Immediate
def test_ldy_immediate_loads_y_sets_n_flag(self):
mpu = self._make_mpu()
mpu.y = 0x00
# $0000 LDY #$80
self._write(mpu.memory, 0x0000, (0xA0, 0x80))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldy_immediate_loads_y_sets_z_flag(self):
mpu = self._make_mpu()
mpu.y = 0xFF
# $0000 LDY #$00
self._write(mpu.memory, 0x0000, (0xA0, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDY Absolute, X-Indexed
def test_ldy_abs_x_indexed_loads_x_sets_n_flag(self):
mpu = self._make_mpu()
mpu.y = 0x00
mpu.x = 0x03
# $0000 LDY $ABCD,X
self._write(mpu.memory, 0x0000, (0xBC, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x80
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldy_abs_x_indexed_loads_x_sets_z_flag(self):
mpu = self._make_mpu()
mpu.y = 0xFF
mpu.x = 0x03
# $0000 LDY $ABCD,X
self._write(mpu.memory, 0x0000, (0xBC, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDY Zero Page, X-Indexed
def test_ldy_zp_x_indexed_loads_x_sets_n_flag(self):
mpu = self._make_mpu()
mpu.y = 0x00
mpu.x = 0x03
# $0000 LDY $0010,X
self._write(mpu.memory, 0x0000, (0xB4, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldy_zp_x_indexed_loads_x_sets_z_flag(self):
mpu = self._make_mpu()
mpu.y = 0xFF
mpu.x = 0x03
# $0000 LDY $0010,X
self._write(mpu.memory, 0x0000, (0xB4, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LSR Accumulator
def test_lsr_accumulator_rotates_in_zero_not_carry(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 LSR A
mpu.memory[0x0000] = (0x4A)
mpu.a = 0x00
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_accumulator_sets_carry_and_zero_flags_after_rotation(self):
mpu = self._make_mpu()
mpu.p &= ~mpu.CARRY
# $0000 LSR A
mpu.memory[0x0000] = (0x4A)
mpu.a = 0x01
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_accumulator_rotates_bits_right(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 LSR A
mpu.memory[0x0000] = (0x4A)
mpu.a = 0x04
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LSR Absolute
def test_lsr_absolute_rotates_in_zero_not_carry(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 LSR $ABCD
self._write(mpu.memory, 0x0000, (0x4E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_absolute_sets_carry_and_zero_flags_after_rotation(self):
mpu = self._make_mpu()
mpu.p &= ~mpu.CARRY
# $0000 LSR $ABCD
self._write(mpu.memory, 0x0000, (0x4E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_absolute_rotates_bits_right(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 LSR $ABCD
self._write(mpu.memory, 0x0000, (0x4E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x04
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x02, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LSR Zero Page
def test_lsr_zp_rotates_in_zero_not_carry(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 LSR $0010
self._write(mpu.memory, 0x0000, (0x46, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_zp_sets_carry_and_zero_flags_after_rotation(self):
mpu = self._make_mpu()
mpu.p &= ~mpu.CARRY
# $0000 LSR $0010
self._write(mpu.memory, 0x0000, (0x46, 0x10))
mpu.memory[0x0010] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_zp_rotates_bits_right(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 LSR $0010
self._write(mpu.memory, 0x0000, (0x46, 0x10))
mpu.memory[0x0010] = 0x04
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x02, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LSR Absolute, X-Indexed
def test_lsr_abs_x_indexed_rotates_in_zero_not_carry(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
mpu.x = 0x03
# $0000 LSR $ABCD,X
self._write(mpu.memory, 0x0000, (0x5E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_abs_x_indexed_sets_c_and_z_flags_after_rotation(self):
mpu = self._make_mpu()
mpu.p &= ~mpu.CARRY
mpu.x = 0x03
# $0000 LSR $ABCD,X
self._write(mpu.memory, 0x0000, (0x5E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_abs_x_indexed_rotates_bits_right(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 LSR $ABCD,X
self._write(mpu.memory, 0x0000, (0x5E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x04
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x02, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LSR Zero Page, X-Indexed
def test_lsr_zp_x_indexed_rotates_in_zero_not_carry(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
mpu.x = 0x03
# $0000 LSR $0010,X
self._write(mpu.memory, 0x0000, (0x56, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_zp_x_indexed_sets_carry_and_zero_flags_after_rotation(self):
mpu = self._make_mpu()
mpu.p &= ~mpu.CARRY
mpu.x = 0x03
# $0000 LSR $0010,X
self._write(mpu.memory, 0x0000, (0x56, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_zp_x_indexed_rotates_bits_right(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
mpu.x = 0x03
# $0000 LSR $0010,X
self._write(mpu.memory, 0x0000, (0x56, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x04
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x02, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# NOP
def test_nop_does_nothing(self):
mpu = self._make_mpu()
# $0000 NOP
mpu.memory[0x0000] = 0xEA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
# ORA Absolute
def test_ora_absolute_zeroes_or_zeros_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
# $0000 ORA $ABCD
self._write(mpu.memory, 0x0000, (0x0D, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_ora_absolute_turns_bits_on_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x03
# $0000 ORA $ABCD
self._write(mpu.memory, 0x0000, (0x0D, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x82
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x83, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ORA Zero Page
def test_ora_zp_zeroes_or_zeros_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
# $0000 ORA $0010
self._write(mpu.memory, 0x0000, (0x05, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_ora_zp_turns_bits_on_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x03
# $0000 ORA $0010
self._write(mpu.memory, 0x0000, (0x05, 0x10))
mpu.memory[0x0010] = 0x82
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x83, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ORA Immediate
def test_ora_immediate_zeroes_or_zeros_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
# $0000 ORA #$00
self._write(mpu.memory, 0x0000, (0x09, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_ora_immediate_turns_bits_on_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x03
# $0000 ORA #$82
self._write(mpu.memory, 0x0000, (0x09, 0x82))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x83, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ORA Absolute, X
def test_ora_abs_x_indexed_zeroes_or_zeros_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
mpu.x = 0x03
# $0000 ORA $ABCD,X
self._write(mpu.memory, 0x0000, (0x1D, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_ora_abs_x_indexed_turns_bits_on_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x03
mpu.x = 0x03
# $0000 ORA $ABCD,X
self._write(mpu.memory, 0x0000, (0x1D, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x82
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x83, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ORA Absolute, Y
def test_ora_abs_y_indexed_zeroes_or_zeros_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
mpu.y = 0x03
# $0000 ORA $ABCD,Y
self._write(mpu.memory, 0x0000, (0x19, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_ora_abs_y_indexed_turns_bits_on_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x03
mpu.y = 0x03
# $0000 ORA $ABCD,Y
self._write(mpu.memory, 0x0000, (0x19, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x82
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x83, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ORA Indirect, Indexed (X)
def test_ora_ind_indexed_x_zeroes_or_zeros_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
mpu.x = 0x03
# $0000 ORA ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x01, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_ora_ind_indexed_x_turns_bits_on_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x03
mpu.x = 0x03
# $0000 ORA ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x01, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x82
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x83, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ORA Indexed, Indirect (Y)
def test_ora_indexed_ind_y_zeroes_or_zeros_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
mpu.y = 0x03
# $0000 ORA ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x11, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_ora_indexed_ind_y_turns_bits_on_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x03
mpu.y = 0x03
# $0000 ORA ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x11, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x82
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x83, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ORA Zero Page, X
def test_ora_zp_x_indexed_zeroes_or_zeros_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
mpu.x = 0x03
# $0000 ORA $0010,X
self._write(mpu.memory, 0x0000, (0x15, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_ora_zp_x_indexed_turns_bits_on_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x03
mpu.x = 0x03
# $0000 ORA $0010,X
self._write(mpu.memory, 0x0000, (0x15, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x82
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x83, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# PHA
def test_pha_pushes_a_and_updates_sp(self):
mpu = self._make_mpu()
mpu.a = 0xAB
# $0000 PHA
mpu.memory[0x0000] = 0x48
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xAB, mpu.a)
self.assertEqual(0xAB, mpu.memory[0x01FF])
self.assertEqual(0xFE, mpu.sp)
# PHP
def test_php_pushes_processor_status_and_updates_sp(self):
for flags in range(0x100):
mpu = self._make_mpu()
mpu.p = flags | mpu.BREAK | mpu.UNUSED
# $0000 PHP
mpu.memory[0x0000] = 0x08
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual((flags | mpu.BREAK | mpu.UNUSED),
mpu.memory[0x1FF])
self.assertEqual(0xFE, mpu.sp)
# PLA
def test_pla_pulls_top_byte_from_stack_into_a_and_updates_sp(self):
mpu = self._make_mpu()
# $0000 PLA
mpu.memory[0x0000] = 0x68
mpu.memory[0x01FF] = 0xAB
mpu.sp = 0xFE
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xAB, mpu.a)
self.assertEqual(0xFF, mpu.sp)
# PLP
def test_plp_pulls_top_byte_from_stack_into_flags_and_updates_sp(self):
mpu = self._make_mpu()
# $0000 PLP
mpu.memory[0x0000] = 0x28
mpu.memory[0x01FF] = 0xBA # must have BREAK and UNUSED set
mpu.sp = 0xFE
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xBA, mpu.p)
self.assertEqual(0xFF, mpu.sp)
# ROL Accumulator
def test_rol_accumulator_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.p &= ~(mpu.CARRY)
# $0000 ROL A
mpu.memory[0x0000] = 0x2A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_accumulator_80_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x80
mpu.p &= ~(mpu.CARRY)
mpu.p &= ~(mpu.ZERO)
# $0000 ROL A
mpu.memory[0x0000] = 0x2A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_accumulator_zero_and_carry_one_clears_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.p |= mpu.CARRY
# $0000 ROL A
mpu.memory[0x0000] = 0x2A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_accumulator_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x40
mpu.p |= mpu.CARRY
# $0000 ROL A
mpu.memory[0x0000] = 0x2A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x81, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_rol_accumulator_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.a = 0x7F
mpu.p &= ~(mpu.CARRY)
# $0000 ROL A
mpu.memory[0x0000] = 0x2A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xFE, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_rol_accumulator_shifts_out_one(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.p &= ~(mpu.CARRY)
# $0000 ROL A
mpu.memory[0x0000] = 0x2A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xFE, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ROL Absolute
def test_rol_absolute_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $ABCD
self._write(mpu.memory, 0x0000, (0x2E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_absolute_80_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.p &= ~(mpu.ZERO)
# $0000 ROL $ABCD
self._write(mpu.memory, 0x0000, (0x2E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x80
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_absolute_zero_and_carry_one_clears_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.p |= mpu.CARRY
# $0000 ROL $ABCD
self._write(mpu.memory, 0x0000, (0x2E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_absolute_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 ROL $ABCD
self._write(mpu.memory, 0x0000, (0x2E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x40
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x81, mpu.memory[0xABCD])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_rol_absolute_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $ABCD
self._write(mpu.memory, 0x0000, (0x2E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x7F
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFE, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_rol_absolute_shifts_out_one(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $ABCD
self._write(mpu.memory, 0x0000, (0x2E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFE, mpu.memory[0xABCD])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ROL Zero Page
def test_rol_zp_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $0010
self._write(mpu.memory, 0x0000, (0x26, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_zp_80_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.p &= ~(mpu.ZERO)
# $0000 ROL $0010
self._write(mpu.memory, 0x0000, (0x26, 0x10))
mpu.memory[0x0010] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_zp_zero_and_carry_one_clears_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.p |= mpu.CARRY
# $0000 ROL $0010
self._write(mpu.memory, 0x0000, (0x26, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_zp_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 ROL $0010
self._write(mpu.memory, 0x0000, (0x26, 0x10))
mpu.memory[0x0010] = 0x40
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x81, mpu.memory[0x0010])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_rol_zp_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $0010
self._write(mpu.memory, 0x0000, (0x26, 0x10))
mpu.memory[0x0010] = 0x7F
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFE, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_rol_zp_shifts_out_one(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $0010
self._write(mpu.memory, 0x0000, (0x26, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFE, mpu.memory[0x0010])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ROL Absolute, X-Indexed
def test_rol_abs_x_indexed_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.x = 0x03
# $0000 ROL $ABCD,X
self._write(mpu.memory, 0x0000, (0x3E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_abs_x_indexed_80_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.p &= ~(mpu.ZERO)
mpu.x = 0x03
# $0000 ROL $ABCD,X
self._write(mpu.memory, 0x0000, (0x3E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x80
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_abs_x_indexed_zero_and_carry_one_clears_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ROL $ABCD,X
self._write(mpu.memory, 0x0000, (0x3E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_abs_x_indexed_sets_n_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ROL $ABCD,X
self._write(mpu.memory, 0x0000, (0x3E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x40
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x81, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_rol_abs_x_indexed_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $ABCD,X
self._write(mpu.memory, 0x0000, (0x3E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x7F
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFE, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_rol_abs_x_indexed_shifts_out_one(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $ABCD,X
self._write(mpu.memory, 0x0000, (0x3E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFE, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ROL Zero Page, X-Indexed
def test_rol_zp_x_indexed_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.x = 0x03
self._write(mpu.memory, 0x0000, (0x36, 0x10))
# $0000 ROL $0010,X
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_zp_x_indexed_80_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.p &= ~(mpu.ZERO)
mpu.x = 0x03
self._write(mpu.memory, 0x0000, (0x36, 0x10))
# $0000 ROL $0010,X
mpu.memory[0x0010 + mpu.x] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_zp_x_indexed_zero_and_carry_one_clears_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
self._write(mpu.memory, 0x0000, (0x36, 0x10))
# $0000 ROL $0010,X
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_zp_x_indexed_sets_n_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ROL $0010,X
self._write(mpu.memory, 0x0000, (0x36, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x40
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x81, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_rol_zp_x_indexed_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $0010,X
self._write(mpu.memory, 0x0000, (0x36, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x7F
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFE, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_rol_zp_x_indexed_shifts_out_one(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $0010,X
self._write(mpu.memory, 0x0000, (0x36, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFE, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ROR Accumulator
def test_ror_accumulator_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.p &= ~(mpu.CARRY)
# $0000 ROR A
mpu.memory[0x0000] = 0x6A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_ror_accumulator_zero_and_carry_one_rotates_in_sets_n_flags(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.p |= mpu.CARRY
# $0000 ROR A
mpu.memory[0x0000] = 0x6A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_ror_accumulator_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.a = 0x02
mpu.p |= mpu.CARRY
# $0000 ROR A
mpu.memory[0x0000] = 0x6A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x81, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_ror_accumulator_shifts_out_one(self):
mpu = self._make_mpu()
mpu.a = 0x03
mpu.p |= mpu.CARRY
# $0000 ROR A
mpu.memory[0x0000] = 0x6A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x81, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ROR Absolute
def test_ror_absolute_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 ROR $ABCD
self._write(mpu.memory, 0x0000, (0x6E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_ror_absolute_zero_and_carry_one_rotates_in_sets_n_flags(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 ROR $ABCD
self._write(mpu.memory, 0x0000, (0x6E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_ror_absolute_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 ROR $ABCD
self._write(mpu.memory, 0x0000, (0x6E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x02
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x81, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_ror_absolute_shifts_out_one(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 ROR $ABCD
self._write(mpu.memory, 0x0000, (0x6E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x03
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x81, mpu.memory[0xABCD])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ROR Zero Page
def test_ror_zp_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 ROR $0010
self._write(mpu.memory, 0x0000, (0x66, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_ror_zp_zero_and_carry_one_rotates_in_sets_n_flags(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 ROR $0010
self._write(mpu.memory, 0x0000, (0x66, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_ror_zp_zero_absolute_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 ROR $0010
self._write(mpu.memory, 0x0000, (0x66, 0x10))
mpu.memory[0x0010] = 0x02
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x81, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_ror_zp_shifts_out_one(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 ROR $0010
self._write(mpu.memory, 0x0000, (0x66, 0x10))
mpu.memory[0x0010] = 0x03
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x81, mpu.memory[0x0010])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ROR Absolute, X-Indexed
def test_ror_abs_x_indexed_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p &= ~(mpu.CARRY)
# $0000 ROR $ABCD,X
self._write(mpu.memory, 0x0000, (0x7E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_ror_abs_x_indexed_z_and_c_1_rotates_in_sets_n_flags(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ROR $ABCD,X
self._write(mpu.memory, 0x0000, (0x7E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_ror_abs_x_indexed_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ROR $ABCD,X
self._write(mpu.memory, 0x0000, (0x7E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x02
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x81, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_ror_abs_x_indexed_shifts_out_one(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ROR $ABCD,X
self._write(mpu.memory, 0x0000, (0x7E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x03
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x81, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ROR Zero Page, X-Indexed
def test_ror_zp_x_indexed_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p &= ~(mpu.CARRY)
# $0000 ROR $0010,X
self._write(mpu.memory, 0x0000, (0x76, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_ror_zp_x_indexed_zero_and_carry_one_rotates_in_sets_n_flags(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ROR $0010,X
self._write(mpu.memory, 0x0000, (0x76, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_ror_zp_x_indexed_zero_absolute_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ROR $0010,X
self._write(mpu.memory, 0x0000, (0x76, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x02
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x81, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_ror_zp_x_indexed_shifts_out_one(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ROR $0010,X
self._write(mpu.memory, 0x0000, (0x76, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x03
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x81, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# RTI
def test_rti_restores_status_and_pc_and_updates_sp(self):
mpu = self._make_mpu()
# $0000 RTI
mpu.memory[0x0000] = 0x40
self._write(mpu.memory, 0x01FD, (0xFC, 0x03, 0xC0)) # Status, PCL, PCH
mpu.sp = 0xFC
mpu.step()
self.assertEqual(0xC003, mpu.pc)
self.assertEqual(0xFC, mpu.p)
self.assertEqual(0xFF, mpu.sp)
def test_rti_forces_break_and_unused_flags_high(self):
mpu = self._make_mpu()
# $0000 RTI
mpu.memory[0x0000] = 0x40
self._write(mpu.memory, 0x01FD, (0x00, 0x03, 0xC0)) # Status, PCL, PCH
mpu.sp = 0xFC
mpu.step()
self.assertEqual(mpu.BREAK, mpu.p & mpu.BREAK)
self.assertEqual(mpu.UNUSED, mpu.p & mpu.UNUSED)
# RTS
def test_rts_restores_pc_and_increments_then_updates_sp(self):
mpu = self._make_mpu()
# $0000 RTS
mpu.memory[0x0000] = 0x60
self._write(mpu.memory, 0x01FE, (0x03, 0xC0)) # PCL, PCH
mpu.pc = 0x0000
mpu.sp = 0xFD
mpu.step()
self.assertEqual(0xC004, mpu.pc)
self.assertEqual(0xFF, mpu.sp)
def test_rts_wraps_around_top_of_memory(self):
mpu = self._make_mpu()
# $1000 RTS
mpu.memory[0x1000] = 0x60
self._write(mpu.memory, 0x01FE, (0xFF, 0xFF)) # PCL, PCH
mpu.pc = 0x1000
mpu.sp = 0xFD
mpu.step()
self.assertEqual(0x0000, mpu.pc)
self.assertEqual(0xFF, mpu.sp)
# SBC Absolute
def test_sbc_abs_all_zeros_and_no_borrow_is_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x00
# $0000 SBC $ABCD
self._write(mpu.memory, 0x0000, (0xED, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_downto_zero_no_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x01
# $0000 SBC $ABCD
self._write(mpu.memory, 0x0000, (0xED, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x01
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_downto_zero_with_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x01
# $0000 SBC $ABCD
self._write(mpu.memory, 0x0000, (0xED, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_downto_four_with_borrow_clears_z_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x07
# $0000 SBC $ABCD
self._write(mpu.memory, 0x0000, (0xED, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x02
mpu.step()
self.assertEqual(0x04, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.CARRY)
# SBC Zero Page
def test_sbc_zp_all_zeros_and_no_borrow_is_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x00
# $0000 SBC $10
self._write(mpu.memory, 0x0000, (0xE5, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_zp_downto_zero_no_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x01
# $0000 SBC $10
self._write(mpu.memory, 0x0000, (0xE5, 0x10))
mpu.memory[0x0010] = 0x01
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_zp_downto_zero_with_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x01
# => SBC $10
self._write(mpu.memory, 0x0000, (0xE5, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_zp_downto_four_with_borrow_clears_z_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x07
# => SBC $10
self._write(mpu.memory, 0x0000, (0xE5, 0x10))
mpu.memory[0x0010] = 0x02
mpu.step()
self.assertEqual(0x04, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.CARRY)
# SBC Immediate
def test_sbc_imm_all_zeros_and_no_borrow_is_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x00
# $0000 SBC #$00
self._write(mpu.memory, 0x0000, (0xE9, 0x00))
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_imm_downto_zero_no_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x01
# $0000 SBC #$01
self._write(mpu.memory, 0x0000, (0xE9, 0x01))
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_imm_downto_zero_with_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x01
# $0000 SBC #$00
self._write(mpu.memory, 0x0000, (0xE9, 0x00))
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_imm_downto_four_with_borrow_clears_z_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x07
# $0000 SBC #$02
self._write(mpu.memory, 0x0000, (0xE9, 0x02))
mpu.step()
self.assertEqual(0x04, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.CARRY)
def test_sbc_bcd_on_immediate_0a_minus_00_carry_set(self):
mpu = self._make_mpu()
mpu.p |= mpu.DECIMAL
mpu.p |= mpu.CARRY
mpu.a = 0x0a
# $0000 SBC #$00
self._write(mpu.memory, 0x0000, (0xe9, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x0a, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_sbc_bcd_on_immediate_9a_minus_00_carry_set(self):
mpu = self._make_mpu()
mpu.p |= mpu.DECIMAL
mpu.p |= mpu.CARRY
mpu.a = 0x9a
#$0000 SBC #$00
self._write(mpu.memory, 0x0000, (0xe9, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x9a, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_sbc_bcd_on_immediate_00_minus_01_carry_set(self):
mpu = self._make_mpu()
mpu.p |= mpu.DECIMAL
mpu.p |= mpu.OVERFLOW
mpu.p |= mpu.ZERO
mpu.p |= mpu.CARRY
mpu.a = 0x00
# => $0000 SBC #$00
self._write(mpu.memory, 0x0000, (0xe9, 0x01))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x99, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_sbc_bcd_on_immediate_20_minus_0a_carry_unset(self):
mpu = self._make_mpu()
mpu.p |= mpu.DECIMAL
mpu.a = 0x20
# $0000 SBC #$00
self._write(mpu.memory, 0x0000, (0xe9, 0x0a))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x1f, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# SBC Absolute, X-Indexed
def test_sbc_abs_x_all_zeros_and_no_borrow_is_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x00
# $0000 SBC $FEE0,X
self._write(mpu.memory, 0x0000, (0xFD, 0xE0, 0xFE))
mpu.x = 0x0D
mpu.memory[0xFEED] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_x_downto_zero_no_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x01
# $0000 SBC $FEE0,X
self._write(mpu.memory, 0x0000, (0xFD, 0xE0, 0xFE))
mpu.x = 0x0D
mpu.memory[0xFEED] = 0x01
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_x_downto_zero_with_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x01
# $0000 SBC $FEE0,X
self._write(mpu.memory, 0x0000, (0xFD, 0xE0, 0xFE))
mpu.x = 0x0D
mpu.memory[0xFEED] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_x_downto_four_with_borrow_clears_z_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x07
# $0000 SBC $FEE0,X
self._write(mpu.memory, 0x0000, (0xFD, 0xE0, 0xFE))
mpu.x = 0x0D
mpu.memory[0xFEED] = 0x02
mpu.step()
self.assertEqual(0x04, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.CARRY)
# SBC Absolute, Y-Indexed
def test_sbc_abs_y_all_zeros_and_no_borrow_is_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x00
# $0000 SBC $FEE0,Y
self._write(mpu.memory, 0x0000, (0xF9, 0xE0, 0xFE))
mpu.y = 0x0D
mpu.memory[0xFEED] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_y_downto_zero_no_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x01
# $0000 SBC $FEE0,Y
self._write(mpu.memory, 0x0000, (0xF9, 0xE0, 0xFE))
mpu.y = 0x0D
mpu.memory[0xFEED] = 0x01
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_y_downto_zero_with_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x01
# $0000 SBC $FEE0,Y
self._write(mpu.memory, 0x0000, (0xF9, 0xE0, 0xFE))
mpu.y = 0x0D
mpu.memory[0xFEED] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_y_downto_four_with_borrow_clears_z_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x07
# $0000 SBC $FEE0,Y
self._write(mpu.memory, 0x0000, (0xF9, 0xE0, 0xFE))
mpu.y = 0x0D
mpu.memory[0xFEED] = 0x02
mpu.step()
self.assertEqual(0x04, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.CARRY)
# SBC Indirect, Indexed (X)
def test_sbc_ind_x_all_zeros_and_no_borrow_is_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x00
# $0000 SBC ($10,X)
# $0013 Vector to $FEED
self._write(mpu.memory, 0x0000, (0xE1, 0x10))
self._write(mpu.memory, 0x0013, (0xED, 0xFE))
mpu.x = 0x03
mpu.memory[0xFEED] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_ind_x_downto_zero_no_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x01
# $0000 SBC ($10,X)
# $0013 Vector to $FEED
self._write(mpu.memory, 0x0000, (0xE1, 0x10))
self._write(mpu.memory, 0x0013, (0xED, 0xFE))
mpu.x = 0x03
mpu.memory[0xFEED] = 0x01
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_ind_x_downto_zero_with_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x01
# $0000 SBC ($10,X)
# $0013 Vector to $FEED
self._write(mpu.memory, 0x0000, (0xE1, 0x10))
self._write(mpu.memory, 0x0013, (0xED, 0xFE))
mpu.x = 0x03
mpu.memory[0xFEED] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_ind_x_downto_four_with_borrow_clears_z_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x07
# $0000 SBC ($10,X)
# $0013 Vector to $FEED
self._write(mpu.memory, 0x0000, (0xE1, 0x10))
self._write(mpu.memory, 0x0013, (0xED, 0xFE))
mpu.x = 0x03
mpu.memory[0xFEED] = 0x02
mpu.step()
self.assertEqual(0x04, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.CARRY)
# SBC Indexed, Indirect (Y)
def test_sbc_ind_y_all_zeros_and_no_borrow_is_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x00
mpu.y = 0x03
# $0000 SBC ($10),Y
# $0010 Vector to $FEED
self._write(mpu.memory, 0x0000, (0xF1, 0x10))
self._write(mpu.memory, 0x0010, (0xED, 0xFE))
mpu.memory[0xFEED + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_ind_y_downto_zero_no_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x01
# $0000 SBC ($10),Y
# $0010 Vector to $FEED
self._write(mpu.memory, 0x0000, (0xF1, 0x10))
self._write(mpu.memory, 0x0010, (0xED, 0xFE))
mpu.memory[0xFEED + mpu.y] = 0x01
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_ind_y_downto_zero_with_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x01
# $0000 SBC ($10),Y
# $0010 Vector to $FEED
self._write(mpu.memory, 0x0000, (0xF1, 0x10))
self._write(mpu.memory, 0x0010, (0xED, 0xFE))
mpu.memory[0xFEED + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_ind_y_downto_four_with_borrow_clears_z_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x07
# $0000 SBC ($10),Y
# $0010 Vector to $FEED
self._write(mpu.memory, 0x0000, (0xF1, 0x10))
self._write(mpu.memory, 0x0010, (0xED, 0xFE))
mpu.memory[0xFEED + mpu.y] = 0x02
mpu.step()
self.assertEqual(0x04, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.CARRY)
# SBC Zero Page, X-Indexed
def test_sbc_zp_x_all_zeros_and_no_borrow_is_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x00
# $0000 SBC $10,X
self._write(mpu.memory, 0x0000, (0xF5, 0x10))
mpu.x = 0x0D
mpu.memory[0x001D] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_zp_x_downto_zero_no_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x01
# $0000 SBC $10,X
self._write(mpu.memory, 0x0000, (0xF5, 0x10))
mpu.x = 0x0D
mpu.memory[0x001D] = 0x01
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_zp_x_downto_zero_with_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x01
# $0000 SBC $10,X
self._write(mpu.memory, 0x0000, (0xF5, 0x10))
mpu.x = 0x0D
mpu.memory[0x001D] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_zp_x_downto_four_with_borrow_clears_z_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x07
# $0000 SBC $10,X
self._write(mpu.memory, 0x0000, (0xF5, 0x10))
mpu.x = 0x0D
mpu.memory[0x001D] = 0x02
mpu.step()
self.assertEqual(0x04, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.CARRY)
# SEC
def test_sec_sets_carry_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 SEC
mpu.memory[0x0000] = 0x038
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# SED
def test_sed_sets_decimal_mode_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
# $0000 SED
mpu.memory[0x0000] = 0xF8
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(mpu.DECIMAL, mpu.p & mpu.DECIMAL)
# SEI
def test_sei_sets_interrupt_disable_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.INTERRUPT)
# $0000 SEI
mpu.memory[0x0000] = 0x78
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(mpu.INTERRUPT, mpu.p & mpu.INTERRUPT)
# STA Absolute
def test_sta_absolute_stores_a_leaves_a_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.a = 0xFF
# $0000 STA $ABCD
self._write(mpu.memory, 0x0000, (0x8D, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xABCD])
self.assertEqual(0xFF, mpu.a)
self.assertEqual(flags, mpu.p)
def test_sta_absolute_stores_a_leaves_a_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.a = 0x00
# $0000 STA $ABCD
self._write(mpu.memory, 0x0000, (0x8D, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(0x00, mpu.a)
self.assertEqual(flags, mpu.p)
# STA Zero Page
def test_sta_zp_stores_a_leaves_a_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.a = 0xFF
# $0000 STA $0010
self._write(mpu.memory, 0x0000, (0x85, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0x0010])
self.assertEqual(0xFF, mpu.a)
self.assertEqual(flags, mpu.p)
def test_sta_zp_stores_a_leaves_a_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.a = 0x00
# $0000 STA $0010
self._write(mpu.memory, 0x0000, (0x85, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(0x00, mpu.a)
self.assertEqual(flags, mpu.p)
# STA Absolute, X-Indexed
def test_sta_abs_x_indexed_stores_a_leaves_a_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.a = 0xFF
mpu.x = 0x03
# $0000 STA $ABCD,X
self._write(mpu.memory, 0x0000, (0x9D, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0xFF, mpu.a)
self.assertEqual(flags, mpu.p)
def test_sta_abs_x_indexed_stores_a_leaves_a_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.a = 0x00
mpu.x = 0x03
# $0000 STA $ABCD,X
self._write(mpu.memory, 0x0000, (0x9D, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0x00, mpu.a)
self.assertEqual(flags, mpu.p)
# STA Absolute, Y-Indexed
def test_sta_abs_y_indexed_stores_a_leaves_a_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.a = 0xFF
mpu.y = 0x03
# $0000 STA $ABCD,Y
self._write(mpu.memory, 0x0000, (0x99, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.y])
self.assertEqual(0xFF, mpu.a)
self.assertEqual(flags, mpu.p)
def test_sta_abs_y_indexed_stores_a_leaves_a_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.a = 0x00
mpu.y = 0x03
# $0000 STA $ABCD,Y
self._write(mpu.memory, 0x0000, (0x99, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.y])
self.assertEqual(0x00, mpu.a)
self.assertEqual(flags, mpu.p)
# STA Indirect, Indexed (X)
def test_sta_ind_indexed_x_stores_a_leaves_a_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.a = 0xFF
mpu.x = 0x03
# $0000 STA ($0010,X)
# $0013 Vector to $FEED
self._write(mpu.memory, 0x0000, (0x81, 0x10))
self._write(mpu.memory, 0x0013, (0xED, 0xFE))
mpu.memory[0xFEED] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xFEED])
self.assertEqual(0xFF, mpu.a)
self.assertEqual(flags, mpu.p)
def test_sta_ind_indexed_x_stores_a_leaves_a_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.a = 0x00
mpu.x = 0x03
# $0000 STA ($0010,X)
# $0013 Vector to $FEED
self._write(mpu.memory, 0x0000, (0x81, 0x10))
self._write(mpu.memory, 0x0013, (0xED, 0xFE))
mpu.memory[0xFEED] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xFEED])
self.assertEqual(0x00, mpu.a)
self.assertEqual(flags, mpu.p)
# STA Indexed, Indirect (Y)
def test_sta_indexed_ind_y_stores_a_leaves_a_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.a = 0xFF
mpu.y = 0x03
# $0000 STA ($0010),Y
# $0010 Vector to $FEED
self._write(mpu.memory, 0x0000, (0x91, 0x10))
self._write(mpu.memory, 0x0010, (0xED, 0xFE))
mpu.memory[0xFEED + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xFEED + mpu.y])
self.assertEqual(0xFF, mpu.a)
self.assertEqual(flags, mpu.p)
def test_sta_indexed_ind_y_stores_a_leaves_a_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.a = 0x00
mpu.y = 0x03
# $0000 STA ($0010),Y
# $0010 Vector to $FEED
self._write(mpu.memory, 0x0000, (0x91, 0x10))
self._write(mpu.memory, 0x0010, (0xED, 0xFE))
mpu.memory[0xFEED + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xFEED + mpu.y])
self.assertEqual(0x00, mpu.a)
self.assertEqual(flags, mpu.p)
# STA Zero Page, X-Indexed
def test_sta_zp_x_indexed_stores_a_leaves_a_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.a = 0xFF
mpu.x = 0x03
# $0000 STA $0010,X
self._write(mpu.memory, 0x0000, (0x95, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0xFF, mpu.a)
self.assertEqual(flags, mpu.p)
def test_sta_zp_x_indexed_stores_a_leaves_a_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.a = 0x00
mpu.x = 0x03
# $0000 STA $0010,X
self._write(mpu.memory, 0x0000, (0x95, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0x00, mpu.a)
self.assertEqual(flags, mpu.p)
# STX Absolute
def test_stx_absolute_stores_x_leaves_x_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.x = 0xFF
# $0000 STX $ABCD
self._write(mpu.memory, 0x0000, (0x8E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xABCD])
self.assertEqual(0xFF, mpu.x)
self.assertEqual(flags, mpu.p)
def test_stx_absolute_stores_x_leaves_x_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.x = 0x00
# $0000 STX $ABCD
self._write(mpu.memory, 0x0000, (0x8E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(0x00, mpu.x)
self.assertEqual(flags, mpu.p)
# STX Zero Page
def test_stx_zp_stores_x_leaves_x_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.x = 0xFF
# $0000 STX $0010
self._write(mpu.memory, 0x0000, (0x86, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0x0010])
self.assertEqual(0xFF, mpu.x)
self.assertEqual(flags, mpu.p)
def test_stx_zp_stores_x_leaves_x_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.x = 0x00
# $0000 STX $0010
self._write(mpu.memory, 0x0000, (0x86, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(0x00, mpu.x)
self.assertEqual(flags, mpu.p)
# STX Zero Page, Y-Indexed
def test_stx_zp_y_indexed_stores_x_leaves_x_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.x = 0xFF
mpu.y = 0x03
# $0000 STX $0010,Y
self._write(mpu.memory, 0x0000, (0x96, 0x10))
mpu.memory[0x0010 + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0x0010 + mpu.y])
self.assertEqual(0xFF, mpu.x)
self.assertEqual(flags, mpu.p)
def test_stx_zp_y_indexed_stores_x_leaves_x_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.x = 0x00
mpu.y = 0x03
# $0000 STX $0010,Y
self._write(mpu.memory, 0x0000, (0x96, 0x10))
mpu.memory[0x0010 + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.y])
self.assertEqual(0x00, mpu.x)
self.assertEqual(flags, mpu.p)
# STY Absolute
def test_sty_absolute_stores_y_leaves_y_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.y = 0xFF
# $0000 STY $ABCD
self._write(mpu.memory, 0x0000, (0x8C, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xABCD])
self.assertEqual(0xFF, mpu.y)
self.assertEqual(flags, mpu.p)
def test_sty_absolute_stores_y_leaves_y_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.y = 0x00
# $0000 STY $ABCD
self._write(mpu.memory, 0x0000, (0x8C, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(0x00, mpu.y)
self.assertEqual(flags, mpu.p)
# STY Zero Page
def test_sty_zp_stores_y_leaves_y_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.y = 0xFF
# $0000 STY $0010
self._write(mpu.memory, 0x0000, (0x84, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0x0010])
self.assertEqual(0xFF, mpu.y)
self.assertEqual(flags, mpu.p)
def test_sty_zp_stores_y_leaves_y_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.y = 0x00
# $0000 STY $0010
self._write(mpu.memory, 0x0000, (0x84, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(0x00, mpu.y)
self.assertEqual(flags, mpu.p)
# STY Zero Page, X-Indexed
def test_sty_zp_x_indexed_stores_y_leaves_y_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.y = 0xFF
mpu.x = 0x03
# $0000 STY $0010,X
self._write(mpu.memory, 0x0000, (0x94, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0xFF, mpu.y)
self.assertEqual(flags, mpu.p)
def test_sty_zp_x_indexed_stores_y_leaves_y_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.y = 0x00
mpu.x = 0x03
# $0000 STY $0010,X
self._write(mpu.memory, 0x0000, (0x94, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0x00, mpu.y)
self.assertEqual(flags, mpu.p)
# TAX
def test_tax_transfers_accumulator_into_x(self):
mpu = self._make_mpu()
mpu.a = 0xAB
mpu.x = 0x00
# $0000 TAX
mpu.memory[0x0000] = 0xAA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xAB, mpu.a)
self.assertEqual(0xAB, mpu.x)
def test_tax_sets_negative_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x80
mpu.x = 0x00
# $0000 TAX
mpu.memory[0x0000] = 0xAA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(0x80, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_tax_sets_zero_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
mpu.x = 0xFF
# $0000 TAX
mpu.memory[0x0000] = 0xAA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
# TAY
def test_tay_transfers_accumulator_into_y(self):
mpu = self._make_mpu()
mpu.a = 0xAB
mpu.y = 0x00
# $0000 TAY
mpu.memory[0x0000] = 0xA8
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xAB, mpu.a)
self.assertEqual(0xAB, mpu.y)
def test_tay_sets_negative_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x80
mpu.y = 0x00
# $0000 TAY
mpu.memory[0x0000] = 0xA8
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(0x80, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_tay_sets_zero_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
mpu.y = 0xFF
# $0000 TAY
mpu.memory[0x0000] = 0xA8
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0x00, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
# TSX
def test_tsx_transfers_stack_pointer_into_x(self):
mpu = self._make_mpu()
mpu.sp = 0xAB
mpu.x = 0x00
# $0000 TSX
mpu.memory[0x0000] = 0xBA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xAB, mpu.sp)
self.assertEqual(0xAB, mpu.x)
def test_tsx_sets_negative_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.sp = 0x80
mpu.x = 0x00
# $0000 TSX
mpu.memory[0x0000] = 0xBA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.sp)
self.assertEqual(0x80, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_tsx_sets_zero_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.sp = 0x00
mpu.y = 0xFF
# $0000 TSX
mpu.memory[0x0000] = 0xBA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.sp)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
# TXA
def test_txa_transfers_x_into_a(self):
mpu = self._make_mpu()
mpu.x = 0xAB
mpu.a = 0x00
# $0000 TXA
mpu.memory[0x0000] = 0x8A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xAB, mpu.a)
self.assertEqual(0xAB, mpu.x)
def test_txa_sets_negative_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.x = 0x80
mpu.a = 0x00
# $0000 TXA
mpu.memory[0x0000] = 0x8A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(0x80, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_txa_sets_zero_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.x = 0x00
mpu.a = 0xFF
# $0000 TXA
mpu.memory[0x0000] = 0x8A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
# TXS
def test_txs_transfers_x_into_stack_pointer(self):
mpu = self._make_mpu()
mpu.x = 0xAB
# $0000 TXS
mpu.memory[0x0000] = 0x9A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xAB, mpu.sp)
self.assertEqual(0xAB, mpu.x)
def test_txs_does_not_set_negative_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.x = 0x80
# $0000 TXS
mpu.memory[0x0000] = 0x9A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.sp)
self.assertEqual(0x80, mpu.x)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_txs_does_not_set_zero_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.x = 0x00
# $0000 TXS
mpu.memory[0x0000] = 0x9A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.sp)
self.assertEqual(0x00, mpu.x)
self.assertEqual(0, mpu.p & mpu.ZERO)
# TYA
def test_tya_transfers_y_into_a(self):
mpu = self._make_mpu()
mpu.y = 0xAB
mpu.a = 0x00
# $0000 TYA
mpu.memory[0x0000] = 0x98
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xAB, mpu.a)
self.assertEqual(0xAB, mpu.y)
def test_tya_sets_negative_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.y = 0x80
mpu.a = 0x00
# $0000 TYA
mpu.memory[0x0000] = 0x98
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(0x80, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_tya_sets_zero_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.y = 0x00
mpu.a = 0xFF
# $0000 TYA
mpu.memory[0x0000] = 0x98
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0x00, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0x0001, mpu.pc)
def test_decorated_addressing_modes_are_valid(self):
valid_modes = [x[0] for x in py65.assembler.Assembler.Addressing]
mpu = self._make_mpu()
for name, mode in mpu.disassemble:
self.assertTrue(mode in valid_modes)
def test_brk_interrupt(self):
mpu = self._make_mpu()
mpu.p = 0x00
self._write(mpu.memory, 0xFFFE, (0x00, 0x04))
self._write(mpu.memory, 0x0000, (0xA9, 0x01, # LDA #$01
0x00, 0xEA, # BRK + skipped byte
0xEA, 0xEA, # NOP, NOP
0xA9, 0x03)) # LDA #$03
self._write(mpu.memory, 0x0400, (0xA9, 0x02, # LDA #$02
0x40)) # RTI
mpu.step() # LDA #$01
self.assertEqual(0x01, mpu.a)
self.assertEqual(0x0002, mpu.pc)
mpu.step() # BRK
self.assertEqual(0x0400, mpu.pc)
mpu.step() # LDA #$02
self.assertEqual(0x02, mpu.a)
self.assertEqual(0x0402, mpu.pc)
mpu.step() # RTI
self.assertEqual(0x0004, mpu.pc)
mpu.step() # A NOP
mpu.step() # The second NOP
mpu.step() # LDA #$03
self.assertEqual(0x03, mpu.a)
self.assertEqual(0x0008, mpu.pc)
# Test Helpers
def _write(self, memory, start_address, bytes):
memory[start_address:start_address + len(bytes)] = bytes
def _make_mpu(self, *args, **kargs):
klass = self._get_target_class()
mpu = klass(*args, **kargs)
if 'memory' not in kargs:
mpu.memory = 0x10000 * [0xAA]
return mpu
def _get_target_class(self):
raise NotImplementedError("Target class not specified")
class MPUTests(unittest.TestCase, Common6502Tests):
""" NMOS 6502 tests """
def test_repr(self):
mpu = self._make_mpu()
self.assertTrue("6502" in repr(mpu))
# ADC Indirect, Indexed (X)
def test_adc_ind_indexed_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.p = 0x00
mpu.a = 0x01
mpu.x = 0xFF
# $0000 ADC ($80,X)
# $007f Vector to $BBBB (read if page wrapped)
# $017f Vector to $ABCD (read if no page wrap)
self._write(mpu.memory, 0x0000, (0x61, 0x80))
self._write(mpu.memory, 0x007f, (0xBB, 0xBB))
self._write(mpu.memory, 0x017f, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x01
mpu.memory[0xBBBB] = 0x02
mpu.step()
self.assertEqual(0x03, mpu.a)
# ADC Indexed, Indirect (Y)
def test_adc_indexed_ind_y_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.pc = 0x1000
mpu.p = 0
mpu.a = 0x42
mpu.y = 0x02
# $1000 ADC ($FF),Y
self._write(mpu.memory, 0x1000, (0x71, 0xff))
# Vector
mpu.memory[0x00ff] = 0x10 # low byte
mpu.memory[0x0100] = 0x20 # high byte if no page wrap
mpu.memory[0x0000] = 0x00 # high byte if page wrapped
# Data
mpu.memory[0x2012] = 0x14 # read if no page wrap
mpu.memory[0x0012] = 0x42 # read if page wrapped
mpu.step()
self.assertEqual(0x84, mpu.a)
# LDA Zero Page, X-Indexed
def test_lda_zp_x_indexed_page_wraps(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0xFF
# $0000 LDA $80,X
self._write(mpu.memory, 0x0000, (0xB5, 0x80))
mpu.memory[0x007F] = 0x42
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x42, mpu.a)
# AND Indexed, Indirect (Y)
def test_and_indexed_ind_y_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.pc = 0x1000
mpu.a = 0x42
mpu.y = 0x02
# $1000 AND ($FF),Y
self._write(mpu.memory, 0x1000, (0x31, 0xff))
# Vector
mpu.memory[0x00ff] = 0x10 # low byte
mpu.memory[0x0100] = 0x20 # high byte if no page wrap
mpu.memory[0x0000] = 0x00 # high byte if page wrapped
# Data
mpu.memory[0x2012] = 0x00 # read if no page wrap
mpu.memory[0x0012] = 0xFF # read if page wrapped
mpu.step()
self.assertEqual(0x42, mpu.a)
# BRK
def test_brk_preserves_decimal_flag_when_it_is_set(self):
mpu = self._make_mpu()
mpu.p = mpu.DECIMAL
# $C000 BRK
mpu.memory[0xC000] = 0x00
mpu.pc = 0xC000
mpu.step()
self.assertEqual(mpu.BREAK, mpu.p & mpu.BREAK)
self.assertEqual(mpu.DECIMAL, mpu.p & mpu.DECIMAL)
def test_brk_preserves_decimal_flag_when_it_is_clear(self):
mpu = self._make_mpu()
mpu.p = 0
# $C000 BRK
mpu.memory[0xC000] = 0x00
mpu.pc = 0xC000
mpu.step()
self.assertEqual(mpu.BREAK, mpu.p & mpu.BREAK)
self.assertEqual(0, mpu.p & mpu.DECIMAL)
# CMP Indirect, Indexed (X)
def test_cmp_ind_x_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.p = 0
mpu.a = 0x42
mpu.x = 0xFF
# $0000 CMP ($80,X)
# $007f Vector to $BBBB (read if page wrapped)
# $017f Vector to $ABCD (read if no page wrap)
self._write(mpu.memory, 0x0000, (0xC1, 0x80))
self._write(mpu.memory, 0x007f, (0xBB, 0xBB))
self._write(mpu.memory, 0x017f, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.memory[0xBBBB] = 0x42
mpu.step()
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
# CMP Indexed, Indirect (Y)
def test_cmp_indexed_ind_y_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.pc = 0x1000
mpu.p = 0
mpu.a = 0x42
mpu.y = 0x02
# $1000 CMP ($FF),Y
self._write(mpu.memory, 0x1000, (0xd1, 0xff))
# Vector
mpu.memory[0x00ff] = 0x10 # low byte
mpu.memory[0x0100] = 0x20 # high byte if no page wrap
mpu.memory[0x0000] = 0x00 # high byte if page wrapped
# Data
mpu.memory[0x2012] = 0x14 # read if no page wrap
mpu.memory[0x0012] = 0x42 # read if page wrapped
mpu.step()
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
# EOR Indirect, Indexed (X)
def test_eor_ind_x_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.p = 0
mpu.a = 0xAA
mpu.x = 0xFF
# $0000 EOR ($80,X)
# $007f Vector to $BBBB (read if page wrapped)
# $017f Vector to $ABCD (read if no page wrap)
self._write(mpu.memory, 0x0000, (0x41, 0x80))
self._write(mpu.memory, 0x007f, (0xBB, 0xBB))
self._write(mpu.memory, 0x017f, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.memory[0xBBBB] = 0xFF
mpu.step()
self.assertEqual(0x55, mpu.a)
# EOR Indexed, Indirect (Y)
def test_eor_indexed_ind_y_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.pc = 0x1000
mpu.a = 0xAA
mpu.y = 0x02
# $1000 EOR ($FF),Y
self._write(mpu.memory, 0x1000, (0x51, 0xff))
# Vector
mpu.memory[0x00ff] = 0x10 # low byte
mpu.memory[0x0100] = 0x20 # high byte if no page wrap
mpu.memory[0x0000] = 0x00 # high byte if page wrapped
# Data
mpu.memory[0x2012] = 0x00 # read if no page wrap
mpu.memory[0x0012] = 0xFF # read if page wrapped
mpu.step()
self.assertEqual(0x55, mpu.a)
# LDA Indirect, Indexed (X)
def test_lda_ind_indexed_x_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0xff
# $0000 LDA ($80,X)
# $007f Vector to $BBBB (read if page wrapped)
# $017f Vector to $ABCD (read if no page wrap)
self._write(mpu.memory, 0x0000, (0xA1, 0x80))
self._write(mpu.memory, 0x007f, (0xBB, 0xBB))
self._write(mpu.memory, 0x017f, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x42
mpu.memory[0xBBBB] = 0xEF
mpu.step()
self.assertEqual(0xEF, mpu.a)
# LDA Indexed, Indirect (Y)
def test_lda_indexed_ind_y_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.pc = 0x1000
mpu.a = 0x00
mpu.y = 0x02
# $1000 LDA ($FF),Y
self._write(mpu.memory, 0x1000, (0xb1, 0xff))
# Vector
mpu.memory[0x00ff] = 0x10 # low byte
mpu.memory[0x0100] = 0x20 # high byte if no page wrap
mpu.memory[0x0000] = 0x00 # high byte if page wrapped
# Data
mpu.memory[0x2012] = 0x14 # read if no page wrap
mpu.memory[0x0012] = 0x42 # read if page wrapped
mpu.step()
self.assertEqual(0x42, mpu.a)
# LDA Zero Page, X-Indexed
def test_lda_zp_x_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0xFF
# $0000 LDA $80,X
self._write(mpu.memory, 0x0000, (0xB5, 0x80))
mpu.memory[0x007F] = 0x42
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x42, mpu.a)
# JMP Indirect
def test_jmp_jumps_to_address_with_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.memory[0x00ff] = 0
# $0000 JMP ($00)
self._write(mpu.memory, 0, (0x6c, 0xff, 0x00))
mpu.step()
self.assertEqual(0x6c00, mpu.pc)
self.assertEqual(5, mpu.processorCycles)
# ORA Indexed, Indirect (Y)
def test_ora_indexed_ind_y_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.pc = 0x1000
mpu.a = 0x00
mpu.y = 0x02
# $1000 ORA ($FF),Y
self._write(mpu.memory, 0x1000, (0x11, 0xff))
# Vector
mpu.memory[0x00ff] = 0x10 # low byte
mpu.memory[0x0100] = 0x20 # high byte if no page wrap
mpu.memory[0x0000] = 0x00 # high byte if page wrapped
# Data
mpu.memory[0x2012] = 0x00 # read if no page wrap
mpu.memory[0x0012] = 0x42 # read if page wrapped
mpu.step()
self.assertEqual(0x42, mpu.a)
# SBC Indexed, Indirect (Y)
def test_sbc_indexed_ind_y_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.pc = 0x1000
mpu.p = mpu.CARRY
mpu.a = 0x42
mpu.y = 0x02
# $1000 SBC ($FF),Y
self._write(mpu.memory, 0x1000, (0xf1, 0xff))
# Vector
mpu.memory[0x00ff] = 0x10 # low byte
mpu.memory[0x0100] = 0x20 # high byte if no page wrap
mpu.memory[0x0000] = 0x00 # high byte if page wrapped
# Data
mpu.memory[0x2012] = 0x02 # read if no page wrap
mpu.memory[0x0012] = 0x03 # read if page wrapped
mpu.step()
self.assertEqual(0x3f, mpu.a)
def _get_target_class(self):
return py65.devices.mpu6502.MPU
def test_suite():
return unittest.findTestCases(sys.modules[__name__])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 34.613934
| 79
| 0.593752
|
794c85ba1bb44b487c650fa2a187596f9eacf4c5
| 273
|
py
|
Python
|
Estudo Python/Manipulando Textos/ex026.py
|
ErickFernan/Estudos-de-Pyhton
|
ce6818b7ba1fcede0d0e6708488a6f55f5ae6351
|
[
"MIT"
] | null | null | null |
Estudo Python/Manipulando Textos/ex026.py
|
ErickFernan/Estudos-de-Pyhton
|
ce6818b7ba1fcede0d0e6708488a6f55f5ae6351
|
[
"MIT"
] | null | null | null |
Estudo Python/Manipulando Textos/ex026.py
|
ErickFernan/Estudos-de-Pyhton
|
ce6818b7ba1fcede0d0e6708488a6f55f5ae6351
|
[
"MIT"
] | null | null | null |
frase = input('Digite uma frase: ').lower().strip(' ')
print('A letra "A" aparece {} vezes.'.format(frase.count('a')))
print('A letra "A" aparece a primeira vez em {}.'.format(frase.find('a')+1))
print('A última letra a aparece por ultimo em {}'.format(frase.rfind('a')+1))
| 68.25
| 77
| 0.655678
|
794c85d15a78376016c464a4ea85ac7b6f97d4e8
| 7,551
|
py
|
Python
|
src/utilities/utils_images.py
|
robertsmoto/sodavault
|
200e843be7abe6cc447647bba55c7c1309092e5e
|
[
"BSD-3-Clause"
] | null | null | null |
src/utilities/utils_images.py
|
robertsmoto/sodavault
|
200e843be7abe6cc447647bba55c7c1309092e5e
|
[
"BSD-3-Clause"
] | null | null | null |
src/utilities/utils_images.py
|
robertsmoto/sodavault
|
200e843be7abe6cc447647bba55c7c1309092e5e
|
[
"BSD-3-Clause"
] | null | null | null |
from decouple import config
from django.utils import timezone
from imagekit import ImageSpec
from imagekit.processors import ResizeToFill
from sodavault.utils_logging import svlog_info
import boto3
import botocore
import os
def create_dir_size_var(fn: str, size: tuple) -> str:
# Create dirs
now = timezone.now()
date_dir = now.strftime("%Y/%m/%d/")
fn = f'{fn}-{size[0]}x{size[1]}.webp'
return date_dir, fn
def modify_fn_and_path(filename: str) -> str:
now = timezone.now()
date_dir = now.strftime('%Y/%m/%d/')
# build the filename
base_fn = os.path.basename(filename)
fn = os.path.splitext(base_fn)[0]
fn = "".join(x for x in fn if x.isalnum())
fn = f"{fn}.webp"
return date_dir, fn
def new_filename(instance, filename):
date_dir, fn = modify_fn_and_path(filename=filename)
return os.path.join('advertisingapp/banners/', date_dir, fn)
def new_filename_blog_feat(instance, filename):
date_dir, fn = modify_fn_and_path(filename=filename)
return os.path.join('blogapp/featured/', date_dir, fn)
def new_filename_blog_thumb(instance, filename):
date_dir, fn = modify_fn_and_path(filename=filename)
return os.path.join('blogapp/thumbnail/', date_dir, fn)
def new_filename_blog_cat(instance, filename):
date_dir, fn = modify_fn_and_path(filename=filename)
return os.path.join('blogapp/category/', date_dir, fn)
def new_filename_banner(instance, filename):
date_dir, fn = modify_fn_and_path(filename=filename)
return os.path.join('advertisingapp/banners/', date_dir, fn)
def new_filename_assett(instance, filename):
date_dir, fn = modify_fn_and_path(filename=filename)
return os.path.join('advertisingapp/assetts/', date_dir, fn)
def check_and_remove_file(file_path: str) -> None:
"""Checks if file exists and removes it."""
if os.path.exists(file_path):
os.remove(file_path)
else:
svlog_info(
"The file does not exist.",
field=file_path)
return
def write_image_to_local(django_read: object, fn: str, loc_dir: str) -> str:
# create the dir if it doesn't exist
# write only creates the file, not the dir
if not os.path.exists(loc_dir):
os.makedirs(loc_dir)
file_path = os.path.join(loc_dir, fn)
check_and_remove_file(file_path=file_path)
dest = open(file_path, 'wb')
dest.write(django_read)
return file_path
class FeaturedLgWebp(ImageSpec):
processors = [ResizeToFill(1600, 800)]
format = 'WEBP'
options = {'quality': 80}
class FeaturedMdWebp(ImageSpec):
processors = [ResizeToFill(800, 400)]
format = 'WEBP'
options = {'quality': 80}
class FeaturedSmWebp(ImageSpec):
processors = [ResizeToFill(400, 200)]
format = 'WEBP'
options = {'quality': 80}
class BannerLgSqWebp(ImageSpec):
processors = [ResizeToFill(500, 500)]
format = 'WEBP'
options = {'quality': 80}
class BannerMdSqWebp(ImageSpec):
processors = [ResizeToFill(250, 250)]
format = 'WEBP'
options = {'quality': 80}
class BannerSmSqWebp(ImageSpec):
processors = [ResizeToFill(200, 200)]
format = 'WEBP'
options = {'quality': 80}
class BannerLeaderboardWebp(ImageSpec):
processors = [ResizeToFill(728, 90)]
format = 'WEBP'
options = {'quality': 80}
class BannerLgLeaderboardWebp(ImageSpec):
processors = [ResizeToFill(970, 90)]
format = 'WEBP'
options = {'quality': 80}
class BannerInlineRectangleWebp(ImageSpec):
processors = [ResizeToFill(300, 250)]
format = 'WEBP'
options = {'quality': 80}
class BannerLgRectangleWebp(ImageSpec):
processors = [ResizeToFill(336, 280)]
format = 'WEBP'
options = {'quality': 80}
class BannerSkyScraperWebp(ImageSpec):
processors = [ResizeToFill(160, 600)]
format = 'WEBP'
options = {'quality': 80}
def process_images(k: str, v) -> None:
processor = v[0]
source = v[1]
size = v[2]
subdir = v[3]
base_fn = os.path.basename(source.url)
fn = os.path.splitext(base_fn)[0]
fn = ''.join(x for x in fn if x.isalnum())
# Generate new image
ban = processor(source=source).generate()
# use django api to read processed image
banner_read = ban.read()
date_dir, fn = create_dir_size_var(
fn=fn,
size=size)
# upload image
if config('ENV_USE_SPACES', cast=bool):
file_path = os.path.join(subdir, date_dir, fn)
s3_upload_path = os.path.join('media', file_path)
# need to save image to temp dir before uploading to s3
temp_dir = config('ENV_TEMP_DIR')
local_filepath = write_image_to_local(
django_read=banner_read, fn=fn, loc_dir=temp_dir)
# now upload the local file to CDN
session = boto3.session.Session()
s3client = session.client(
's3',
region_name=config('ENV_AWS_S3_REGION_NAME'),
endpoint_url=config('ENV_AWS_S3_ENDPOINT_URL'),
aws_access_key_id=config('ENV_AWS_ACCESS_KEY_ID'),
aws_secret_access_key=config(
'ENV_AWS_SECRET_ACCESS_KEY'))
# should check if s3 file exists and if so delete it
# before uploading image with same name
s3resource = boto3.resource(
's3',
region_name=config('ENV_AWS_S3_REGION_NAME'),
endpoint_url=config('ENV_AWS_S3_ENDPOINT_URL'),
aws_access_key_id=config('ENV_AWS_ACCESS_KEY_ID'),
aws_secret_access_key=config(
'ENV_AWS_SECRET_ACCESS_KEY'))
try:
s3resource.Object(
config('ENV_AWS_STORAGE_BUCKET_NAME'),
s3_upload_path).load()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
# The object does not exist.
svlog_info("The s3 object does not exist.")
else:
# Something else has gone wrong.
svlog_info(f"Something went wrong with s3: {e}")
else:
# The object does exist.
svlog_info(
"s3 object exists, deleted it before "
"uploading.")
s3resource.Object(
config('ENV_AWS_STORAGE_BUCKET_NAME'),
file_path).delete()
try:
with open(local_filepath, 'rb') as file_contents:
s3client.put_object(
Bucket=config('ENV_AWS_STORAGE_BUCKET_NAME'),
Key=s3_upload_path,
Body=file_contents,
ContentEncoding='webp',
ContentType='image/webp',
CacheControl='max-age=86400',
ACL='public-read')
except Exception as e:
svlog_info("S3 open exception", field=e)
# then delete the local file (local_filepath)
check_and_remove_file(file_path=local_filepath)
else:
media_root = config('ENV_MEDIA_ROOT')
base_dir = os.path.join(
media_root, subdir, date_dir)
# first check if file exists and remove it
# for the development server, write file directly
# to final location
file_path = write_image_to_local(
django_read=banner_read, fn=fn, loc_dir=base_dir)
# assign the file path to the correct field
svlog_info(f"Assign file_path {k}.", field=file_path)
return file_path
| 29.964286
| 76
| 0.629321
|
794c864fb5398ef11f8680a12b740344041063af
| 19,931
|
py
|
Python
|
tools/run-tests.py
|
ErcOneMOD/external_chromium_org_v8
|
d4b68d974eaebae6c56acd20fda20c52112f6511
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
tools/run-tests.py
|
ErcOneMOD/external_chromium_org_v8
|
d4b68d974eaebae6c56acd20fda20c52112f6511
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
tools/run-tests.py
|
ErcOneMOD/external_chromium_org_v8
|
d4b68d974eaebae6c56acd20fda20c52112f6511
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import itertools
import multiprocessing
import optparse
import os
from os.path import join
import platform
import random
import shlex
import subprocess
import sys
import time
from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.network import network_execution
from testrunner.objects import context
ARCH_GUESS = utils.DefaultArch()
DEFAULT_TESTS = ["mjsunit", "fuzz-natives", "cctest", "message", "preparser"]
TIMEOUT_DEFAULT = 60
TIMEOUT_SCALEFACTOR = {"debug" : 4,
"release" : 1 }
# Use this to run several variants of the tests.
VARIANT_FLAGS = {
"default": [],
"stress": ["--stress-opt", "--always-opt"],
"nocrankshaft": ["--nocrankshaft"]}
VARIANTS = ["default", "stress", "nocrankshaft"]
MODE_FLAGS = {
"debug" : ["--nohard-abort", "--nodead-code-elimination",
"--nofold-constants", "--enable-slow-asserts",
"--debug-code", "--verify-heap"],
"release" : ["--nohard-abort", "--nodead-code-elimination",
"--nofold-constants"]}
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
"--concurrent-recompilation-queue-length=64",
"--concurrent-recompilation-delay=500",
"--concurrent-recompilation"]
SUPPORTED_ARCHS = ["android_arm",
"android_arm64",
"android_ia32",
"arm",
"ia32",
"x87",
"mips",
"mipsel",
"nacl_ia32",
"nacl_x64",
"x64",
"arm64"]
# Double the timeout for these:
SLOW_ARCHS = ["android_arm",
"android_arm64",
"android_ia32",
"arm",
"mips",
"mipsel",
"nacl_ia32",
"nacl_x64",
"x87",
"arm64"]
def BuildOptions():
result = optparse.OptionParser()
result.add_option("--arch",
help=("The architecture to run tests for, "
"'auto' or 'native' for auto-detect"),
default="ia32,x64,arm")
result.add_option("--arch-and-mode",
help="Architecture and mode in the format 'arch.mode'",
default=None)
result.add_option("--asan",
help="Regard test expectations for ASAN",
default=False, action="store_true")
result.add_option("--buildbot",
help="Adapt to path structure used on buildbots",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--flaky-tests",
help="Regard tests marked as flaky (run|skip|dontcare)",
default="dontcare")
result.add_option("--slow-tests",
help="Regard slow tests (run|skip|dontcare)",
default="dontcare")
result.add_option("--pass-fail-tests",
help="Regard pass|fail tests (run|skip|dontcare)",
default="dontcare")
result.add_option("--gc-stress",
help="Switch on GC stress mode",
default=False, action="store_true")
result.add_option("--command-prefix",
help="Prepended to each shell command used to run a test",
default="")
result.add_option("--download-data", help="Download missing test suite data",
default=False, action="store_true")
result.add_option("--extra-flags",
help="Additional flags to pass to each test command",
default="")
result.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=0, type="int")
result.add_option("-m", "--mode",
help="The test modes in which to run (comma-separated)",
default="release,debug")
result.add_option("--no-i18n", "--noi18n",
help="Skip internationalization tests",
default=False, action="store_true")
result.add_option("--no-network", "--nonetwork",
help="Don't distribute tests on the network",
default=(utils.GuessOS() != "linux"),
dest="no_network", action="store_true")
result.add_option("--no-presubmit", "--nopresubmit",
help='Skip presubmit checks',
default=False, dest="no_presubmit", action="store_true")
result.add_option("--no-snap", "--nosnap",
help='Test a build compiled without snapshot.',
default=False, dest="no_snap", action="store_true")
result.add_option("--no-sorting", "--nosorting",
help="Don't sort tests according to duration of last run.",
default=False, dest="no_sorting", action="store_true")
result.add_option("--no-stress", "--nostress",
help="Don't run crankshaft --always-opt --stress-op test",
default=False, dest="no_stress", action="store_true")
result.add_option("--no-variants", "--novariants",
help="Don't run any testing variants",
default=False, dest="no_variants", action="store_true")
result.add_option("--variants",
help="Comma-separated list of testing variants")
result.add_option("--outdir", help="Base directory with compile output",
default="out")
result.add_option("-p", "--progress",
help=("The style of progress indicator"
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--quickcheck", default=False, action="store_true",
help=("Quick check mode (skip slow/flaky tests)"))
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("--json-test-results",
help="Path to a file for storing json results.")
result.add_option("--shard-count",
help="Split testsuites into this number of shards",
default=1, type="int")
result.add_option("--shard-run",
help="Run this shard from the split up tests.",
default=1, type="int")
result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="")
result.add_option("--shell-dir", help="Directory containing executables",
default="")
result.add_option("--dont-skip-slow-simulator-tests",
help="Don't skip more slow tests when using a simulator.",
default=False, action="store_true",
dest="dont_skip_simulator_slow_tests")
result.add_option("--stress-only",
help="Only run tests with --always-opt --stress-opt",
default=False, action="store_true")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default= -1, type="int")
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("--junitout", help="File name of the JUnit output")
result.add_option("--junittestsuite",
help="The testsuite name in the JUnit output file",
default="v8tests")
result.add_option("--random-seed", default=0, dest="random_seed",
help="Default seed for initializing random generator")
return result
def ProcessOptions(options):
global VARIANT_FLAGS
global VARIANTS
# Architecture and mode related stuff.
if options.arch_and_mode:
options.arch_and_mode = [arch_and_mode.split(".")
for arch_and_mode in options.arch_and_mode.split(",")]
options.arch = ",".join([tokens[0] for tokens in options.arch_and_mode])
options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode])
options.mode = options.mode.split(",")
for mode in options.mode:
if not mode.lower() in ["debug", "release", "optdebug"]:
print "Unknown mode %s" % mode
return False
if options.arch in ["auto", "native"]:
options.arch = ARCH_GUESS
options.arch = options.arch.split(",")
for arch in options.arch:
if not arch in SUPPORTED_ARCHS:
print "Unknown architecture %s" % arch
return False
# Store the final configuration in arch_and_mode list. Don't overwrite
# predefined arch_and_mode since it is more expressive than arch and mode.
if not options.arch_and_mode:
options.arch_and_mode = itertools.product(options.arch, options.mode)
# Special processing of other options, sorted alphabetically.
if options.buildbot:
# Buildbots run presubmit tests as a separate step.
options.no_presubmit = True
options.no_network = True
if options.command_prefix:
print("Specifying --command-prefix disables network distribution, "
"running tests locally.")
options.no_network = True
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = shlex.split(options.extra_flags)
if options.gc_stress:
options.extra_flags += GC_STRESS_FLAGS
if options.asan:
options.extra_flags.append("--invoke-weak-callbacks")
if options.j == 0:
options.j = multiprocessing.cpu_count()
while options.random_seed == 0:
options.random_seed = random.SystemRandom().randint(-2147483648, 2147483647)
def excl(*args):
"""Returns true if zero or one of multiple arguments are true."""
return reduce(lambda x, y: x + y, args) <= 1
if not excl(options.no_stress, options.stress_only, options.no_variants,
bool(options.variants), options.quickcheck):
print("Use only one of --no-stress, --stress-only, --no-variants, "
"--variants, or --quickcheck.")
return False
if options.no_stress:
VARIANTS = ["default", "nocrankshaft"]
if options.no_variants:
VARIANTS = ["default"]
if options.stress_only:
VARIANTS = ["stress"]
if options.variants:
VARIANTS = options.variants.split(",")
if not set(VARIANTS).issubset(VARIANT_FLAGS.keys()):
print "All variants must be in %s" % str(VARIANT_FLAGS.keys())
return False
if options.quickcheck:
VARIANTS = ["default", "stress"]
options.flaky_tests = "skip"
options.slow_tests = "skip"
options.pass_fail_tests = "skip"
if not options.shell_dir:
if options.shell:
print "Warning: --shell is deprecated, use --shell-dir instead."
options.shell_dir = os.path.dirname(options.shell)
if options.valgrind:
run_valgrind = os.path.join("tools", "run-valgrind.py")
# This is OK for distributed running, so we don't need to set no_network.
options.command_prefix = (["python", "-u", run_valgrind] +
options.command_prefix)
def CheckTestMode(name, option):
if not option in ["run", "skip", "dontcare"]:
print "Unknown %s mode %s" % (name, option)
return False
return True
if not CheckTestMode("flaky test", options.flaky_tests):
return False
if not CheckTestMode("slow test", options.slow_tests):
return False
if not CheckTestMode("pass|fail test", options.pass_fail_tests):
return False
if not options.no_i18n:
DEFAULT_TESTS.append("intl")
return True
def ShardTests(tests, shard_count, shard_run):
if shard_count < 2:
return tests
if shard_run < 1 or shard_run > shard_count:
print "shard-run not a valid number, should be in [1:shard-count]"
print "defaulting back to running all tests"
return tests
count = 0
shard = []
for test in tests:
if count % shard_count == shard_run - 1:
shard.append(test)
count += 1
return shard
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
exit_code = 0
workspace = os.path.abspath(join(os.path.dirname(sys.argv[0]), ".."))
if not options.no_presubmit:
print ">>> running presubmit tests"
exit_code = subprocess.call(
[sys.executable, join(workspace, "tools", "presubmit.py")])
suite_paths = utils.GetSuitePaths(join(workspace, "test"))
if len(args) == 0:
suite_paths = [ s for s in DEFAULT_TESTS if s in suite_paths ]
else:
args_suites = set()
for arg in args:
suite = arg.split(os.path.sep)[0]
if not suite in args_suites:
args_suites.add(suite)
suite_paths = [ s for s in args_suites if s in suite_paths ]
suites = []
for root in suite_paths:
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(workspace, "test", root))
if suite:
suites.append(suite)
if options.download_data:
for s in suites:
s.DownloadData()
for (arch, mode) in options.arch_and_mode:
try:
code = Execute(arch, mode, args, options, suites, workspace)
except KeyboardInterrupt:
return 2
exit_code = exit_code or code
return exit_code
def Execute(arch, mode, args, options, suites, workspace):
print(">>> Running tests for %s.%s" % (arch, mode))
shell_dir = options.shell_dir
if not shell_dir:
if options.buildbot:
shell_dir = os.path.join(workspace, options.outdir, mode)
mode = mode.lower()
else:
shell_dir = os.path.join(workspace, options.outdir,
"%s.%s" % (arch, mode))
shell_dir = os.path.relpath(shell_dir)
if mode == "optdebug":
mode = "debug" # "optdebug" is just an alias.
# Populate context object.
mode_flags = MODE_FLAGS[mode]
timeout = options.timeout
if timeout == -1:
# Simulators are slow, therefore allow a longer default timeout.
if arch in SLOW_ARCHS:
timeout = 2 * TIMEOUT_DEFAULT;
else:
timeout = TIMEOUT_DEFAULT;
timeout *= TIMEOUT_SCALEFACTOR[mode]
ctx = context.Context(arch, mode, shell_dir,
mode_flags, options.verbose,
timeout, options.isolates,
options.command_prefix,
options.extra_flags,
options.no_i18n,
options.random_seed,
options.no_sorting)
# TODO(all): Combine "simulator" and "simulator_run".
simulator_run = not options.dont_skip_simulator_slow_tests and \
arch in ['arm64', 'arm', 'mips'] and ARCH_GUESS and arch != ARCH_GUESS
# Find available test suites and read test cases from them.
variables = {
"arch": arch,
"asan": options.asan,
"deopt_fuzzer": False,
"gc_stress": options.gc_stress,
"isolates": options.isolates,
"mode": mode,
"no_i18n": options.no_i18n,
"no_snap": options.no_snap,
"simulator_run": simulator_run,
"simulator": utils.UseSimulator(arch),
"system": utils.GuessOS(),
}
all_tests = []
num_tests = 0
test_id = 0
for s in suites:
s.ReadStatusFile(variables)
s.ReadTestCases(ctx)
if len(args) > 0:
s.FilterTestCasesByArgs(args)
all_tests += s.tests
s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
options.slow_tests, options.pass_fail_tests)
if options.cat:
verbose.PrintTestSource(s.tests)
continue
variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS]
s.tests = [ t.CopyAddingFlags(v)
for t in s.tests
for v in s.VariantFlags(t, variant_flags) ]
s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
num_tests += len(s.tests)
for t in s.tests:
t.id = test_id
test_id += 1
if options.cat:
return 0 # We're done here.
if options.report:
verbose.PrintReport(all_tests)
if num_tests == 0:
print "No tests to run."
return 0
# Run the tests, either locally or distributed on the network.
start_time = time.time()
progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
if options.junitout:
progress_indicator = progress.JUnitTestProgressIndicator(
progress_indicator, options.junitout, options.junittestsuite)
if options.json_test_results:
progress_indicator = progress.JsonTestProgressIndicator(
progress_indicator, options.json_test_results, arch, mode)
run_networked = not options.no_network
if not run_networked:
print("Network distribution disabled, running tests locally.")
elif utils.GuessOS() != "linux":
print("Network distribution is only supported on Linux, sorry!")
run_networked = False
peers = []
if run_networked:
peers = network_execution.GetPeers()
if not peers:
print("No connection to distribution server; running tests locally.")
run_networked = False
elif len(peers) == 1:
print("No other peers on the network; running tests locally.")
run_networked = False
elif num_tests <= 100:
print("Less than 100 tests, running them locally.")
run_networked = False
if run_networked:
runner = network_execution.NetworkedRunner(suites, progress_indicator,
ctx, peers, workspace)
else:
runner = execution.Runner(suites, progress_indicator, ctx)
exit_code = runner.Run(options.j)
overall_duration = time.time() - start_time
if options.time:
verbose.PrintTestDurations(suites, overall_duration)
return exit_code
if __name__ == "__main__":
sys.exit(Main())
| 38.625969
| 80
| 0.633285
|
794c8676a6d1638cb7b71e62ba7418e4dd0f7919
| 45,440
|
py
|
Python
|
wildlifecompliance/components/users/api.py
|
mintcoding/wildlifecompliance
|
28f5bb4ce3116fb62d836a39612c72a052e54ae1
|
[
"Apache-2.0"
] | null | null | null |
wildlifecompliance/components/users/api.py
|
mintcoding/wildlifecompliance
|
28f5bb4ce3116fb62d836a39612c72a052e54ae1
|
[
"Apache-2.0"
] | 3
|
2020-03-12T00:45:31.000Z
|
2022-03-02T10:37:23.000Z
|
wildlifecompliance/components/users/api.py
|
mintcoding/wildlifecompliance
|
28f5bb4ce3116fb62d836a39612c72a052e54ae1
|
[
"Apache-2.0"
] | null | null | null |
import re
import traceback
from django.db.models import Q
from django.db import transaction
from django.http import HttpResponse
from django.core.exceptions import ValidationError
from rest_framework import viewsets, serializers, views, status
#from rest_framework.decorators import detail_route, list_route
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer
from ledger.accounts.models import EmailUser, Address, Profile, EmailIdentity, EmailUserAction
from django.contrib.auth.models import Permission, ContentType
from datetime import datetime
from django_countries import countries
from wildlifecompliance.components.applications.models import Application
from wildlifecompliance.components.applications.email import send_id_updated_notification
from wildlifecompliance.components.call_email.serializers import SaveEmailUserSerializer, SaveUserAddressSerializer
from wildlifecompliance.components.organisations.models import (
OrganisationRequest, Organisation
)
from wildlifecompliance.components.users.models import (
CompliancePermissionGroup,
RegionDistrict,
ComplianceManagementUserPreferences,
)
from wildlifecompliance.helpers import is_customer, is_internal, is_compliance_management_callemail_readonly_user
from wildlifecompliance.components.users.serializers import (
UserSerializer,
DTUserSerializer,
UserProfileSerializer,
UserAddressSerializer,
PersonalSerializer,
ContactSerializer,
EmailIdentitySerializer,
EmailUserActionSerializer,
MyUserDetailsSerializer,
CompliancePermissionGroupSerializer,
RegionDistrictSerializer,
ComplianceUserDetailsSerializer,
CompliancePermissionGroupDetailedSerializer,
ComplianceUserDetailsOptimisedSerializer,
CompliancePermissionGroupMembersSerializer,
UpdateComplianceManagementUserPreferencesSerializer,
ComplianceManagementSaveUserSerializer,
ComplianceManagementUserSerializer,
ComplianceManagementSaveUserAddressSerializer,
FirstTimeUserSerializer,
)
from wildlifecompliance.components.organisations.serializers import (
OrganisationRequestDTSerializer,
)
from rest_framework_datatables.pagination import DatatablesPageNumberPagination
from rest_framework_datatables.filters import DatatablesFilterBackend
from rest_framework_datatables.renderers import DatatablesRenderer
from django.urls import reverse
from django.shortcuts import render, redirect, get_object_or_404
from rest_framework.decorators import (
detail_route,
list_route,
renderer_classes,
parser_classes,
api_view
)
from django.core.cache import cache
from wildlifecompliance.components.main.process_document import process_generic_document
#from wildlifecompliance.components.main.utils import retrieve_department_users
def generate_dummy_email(first_name, last_name):
e = EmailUser(first_name=first_name, last_name=last_name)
email_address = e.get_dummy_email().strip().strip('.').lower()
email_address = re.sub(r'\.+', '.', email_address)
email_address = re.sub(r'\s+', '_', email_address)
return email_address
class IsComplianceManagementCallEmailReadonlyUser(views.APIView):
renderer_classes = [JSONRenderer,]
def get(self, request, format=None):
user = False
if is_compliance_management_callemail_readonly_user(request):
user = True
return Response({"compliance_management_callemail_readonly_user": user})
class GetCountries(views.APIView):
renderer_classes = [JSONRenderer,]
def get(self, request, format=None):
country_list = []
for country in list(countries):
country_list.append({"name": country.name, "code": country.code})
return Response(country_list)
#class DepartmentUserList(views.APIView):
# renderer_classes = [JSONRenderer,]
# def get(self, request, format=None):
# data = cache.get('department_users')
# if not data:
# retrieve_department_users()
# data = cache.get('department_users')
# return Response(data)
#
# #serializer = UserSerializer(request.user)
class GetMyUserDetails(views.APIView):
renderer_classes = [JSONRenderer, ]
def get(self, request, format=None):
serializer = MyUserDetailsSerializer(request.user, context={'request': request})
return Response(serializer.data)
class GetComplianceUserDetails(views.APIView):
renderer_classes = [JSONRenderer, ]
def get(self, request, format=None):
serializer = ComplianceUserDetailsSerializer(request.user, context={'request': request})
returned_data = serializer.data
if returned_data.get('id'):
user_id = returned_data.get('id')
user = EmailUser.objects.get(id=user_id)
compliance_permissions = []
for group in user.groups.all():
for permission in group.permissions.all():
compliance_permissions.append(permission.codename)
returned_data.update({ 'base_compliance_permissions': compliance_permissions })
if 'volunteer' in compliance_permissions:
returned_data.update({'is_volunteer': True})
else:
returned_data.update({'is_volunteer': False})
return Response(returned_data)
class GetUser(views.APIView):
renderer_classes = [JSONRenderer, ]
def get(self, request, format=None):
serializer = PersonalSerializer(request.user)
return Response(serializer.data)
class IsNewUser(views.APIView):
def get(self, request, format=None):
is_new = 'False'
try:
is_new = request.session['is_new']
except BaseException:
pass
return HttpResponse(is_new)
class UserProfileCompleted(views.APIView):
def get(self, request, format=None):
request.session['is_new'] = False
request.session['new_to_wildlifecompliance'] = False
return HttpResponse('OK')
class ProfileViewSet(viewsets.ModelViewSet):
queryset = Profile.objects.all()
serializer_class = UserProfileSerializer
def get_queryset(self):
user = self.request.user
if is_internal(self.request):
return Profile.objects.all()
elif is_customer(self.request):
return Profile.objects.filter(user=user)
return Profile.objects.none()
@detail_route(methods=['POST', ])
def update_profile(self, request, *args, **kwargs):
try:
instance = self.get_object()
serializer = UserProfileSerializer(instance, data=request.data)
serializer.is_valid(raise_exception=True)
instance = serializer.save()
serializer = UserSerializer(instance)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
class MyProfilesViewSet(viewsets.ModelViewSet):
queryset = Profile.objects.all()
serializer_class = UserProfileSerializer
def get_queryset(self):
queryset = self.queryset
query_set = queryset.filter(user=self.request.user)
return query_set
class UserFilterBackend(DatatablesFilterBackend):
def filter_queryset(self, request, queryset, view):
"""
Custom filters
"""
character_flagged = request.GET.get('character_flagged')
dob = request.GET.get('dob')
if queryset.model is EmailUser:
# apply user selected filters
character_flagged = character_flagged if character_flagged else 'all'
if character_flagged.lower() != 'all':
queryset = queryset.filter(character_flagged=character_flagged)
if dob:
queryset = queryset.filter(dob=datetime.strptime(dob, '%Y-%m-%d').date())
queryset = super(UserFilterBackend, self).filter_queryset(request, queryset, view).distinct()
total_count = queryset.count()
setattr(view, '_datatables_total_count', total_count)
return queryset
class UserRenderer(DatatablesRenderer):
def render(self, data, accepted_media_type=None, renderer_context=None):
if 'view' in renderer_context and hasattr(renderer_context['view'], '_datatables_total_count'):
data['recordsTotal'] = renderer_context['view']._datatables_total_count
return super(UserRenderer, self).render(data, accepted_media_type, renderer_context)
class UserPaginatedViewSet(viewsets.ModelViewSet):
filter_backends = (UserFilterBackend,)
pagination_class = DatatablesPageNumberPagination
renderer_classes = (UserRenderer,)
queryset = EmailUser.objects.none()
serializer_class = DTUserSerializer
page_size = 10
def get_queryset(self):
if is_internal(self.request):
return EmailUser.objects.all()
return EmailUser.objects.none()
@list_route(methods=['GET', ])
def datatable_list(self, request, *args, **kwargs):
self.serializer_class = DTUserSerializer
queryset = self.get_queryset()
queryset = self.filter_queryset(queryset)
self.paginator.page_size = queryset.count()
result_page = self.paginator.paginate_queryset(queryset, request)
serializer = DTUserSerializer(result_page, context={'request': request}, many=True)
return self.paginator.get_paginated_response(serializer.data)
class UserViewSet(viewsets.ModelViewSet):
queryset = EmailUser.objects.all()
serializer_class = UserSerializer
def get_queryset(self):
"""
Optionally restrict the query if the following parameters are in the URL:
- first_name
- last_name
- dob
- email
"""
user = self.request.user
if is_internal(self.request):
queryset = EmailUser.objects.all()
elif is_customer(self.request):
queryset = EmailUser.objects.filter(id=user.id)
else:
queryset = EmailUser.objects.none()
first_name = self.request.query_params.get('first_name', None)
last_name = self.request.query_params.get('last_name', None)
dob = self.request.query_params.get('dob', None)
email = self.request.query_params.get('email', None)
if first_name is not None:
queryset = queryset.filter(first_name__iexact=first_name)
if last_name is not None:
queryset = queryset.filter(last_name__iexact=last_name)
if email is not None:
queryset = queryset.filter(email__iexact=email)
if dob is not None and dob is not u'':
queryset = queryset.filter(dob=dob)
return queryset
@detail_route(methods=['GET'])
@renderer_classes((JSONRenderer,))
def get_intelligence_text(self, request, *args, **kwargs):
try:
instance = self.get_object()
intelligence_text = ""
preference_qs = ComplianceManagementUserPreferences.objects.filter(email_user=instance)
if preference_qs:
intelligence_text = preference_qs[0].intelligence_information_text
return Response({"intelligence_text": intelligence_text})
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
# raise serializers.ValidationError(repr(e[0].encode('utf-8')))
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST'])
@renderer_classes((JSONRenderer,))
def save_intelligence_text(self, request, *args, **kwargs):
try:
instance = self.get_object()
intelligence_text = request.data.get('intelligence_text')
preference, created = ComplianceManagementUserPreferences.objects.get_or_create(email_user=instance)
preference.intelligence_information_text = intelligence_text
preference.save()
return Response()
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
# raise serializers.ValidationError(repr(e[0].encode('utf-8')))
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST'])
@renderer_classes((JSONRenderer,))
def process_intelligence_document(self, request, *args, **kwargs):
try:
instance = self.get_object()
# process docs
returned_data = process_generic_document(request, instance, 'intelligence_document')
# delete Sanction Outcome if user cancels modal
action = request.data.get('action')
if action == 'cancel' and returned_data:
instance.status = 'discarded'
instance.save()
# return response
if returned_data:
return Response(returned_data)
else:
return Response()
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
# raise serializers.ValidationError(repr(e[0].encode('utf-8')))
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['GET', ])
def action_log(self, request, *args, **kwargs):
try:
instance = self.get_object()
qs = instance.action_logs.all()
serializer = EmailUserActionSerializer(qs, many=True)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['GET', ])
def profiles(self, request, *args, **kwargs):
try:
instance = self.get_object()
serializer = UserProfileSerializer(
instance.profiles.all(), many=True)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def update_personal(self, request, *args, **kwargs):
try:
instance = self.get_object()
serializer = PersonalSerializer(instance, data=request.data)
serializer.is_valid(raise_exception=True)
with transaction.atomic():
instance = serializer.save()
instance.log_user_action(
EmailUserAction.ACTION_PERSONAL_DETAILS_UPDATE.format(
'{} {} ({})'.format(
instance.first_name,
instance.last_name,
instance.email)),
request)
serializer = UserSerializer(instance)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def update_contact(self, request, *args, **kwargs):
try:
with transaction.atomic():
instance = self.get_object()
serializer = ContactSerializer(instance, data=request.data)
serializer.is_valid(raise_exception=True)
instance = serializer.save()
instance.log_user_action(
EmailUserAction.ACTION_CONTACT_DETAILS_UPDATE.format(
'{} {} ({})'.format(
instance.first_name,
instance.last_name,
instance.email)),
request)
serializer = FirstTimeUserSerializer(
instance, context={'request': request}
)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def update_address(self, request, *args, **kwargs):
try:
instance = self.get_object()
serializer = UserAddressSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
address, created = Address.objects.get_or_create(
# line1=serializer.validated_data['line1'],
locality=serializer.validated_data['locality'],
state=serializer.validated_data['state'],
country=serializer.validated_data['country'],
postcode=serializer.validated_data['postcode'],
user=instance
)
address.line1 = serializer.validated_data['line1']
instance.residential_address = address
with transaction.atomic():
address.save()
instance.save()
instance.log_user_action(
EmailUserAction.ACTION_POSTAL_ADDRESS_UPDATE.format(
'{} {} ({})'.format(
instance.first_name,
instance.last_name,
instance.email)),
request)
serializer = FirstTimeUserSerializer(
instance, context={'request': request}
)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def upload_id(self, request, *args, **kwargs):
from wildlifecompliance.management.securebase_manager import (
SecureBaseUtils
)
try:
instance = self.get_object()
SecureBaseUtils.timestamp_id_request(request)
instance.upload_identification(request)
with transaction.atomic():
instance.save()
instance.log_user_action(
EmailUserAction.ACTION_ID_UPDATE.format(
'{} {} ({})'.format(
instance.first_name,
instance.last_name,
instance.email)),
request)
# For any of the submitter's applications that have requested ID update,
# email the assigned officer
applications = instance.wildlifecompliance_applications.filter(
submitter=instance,
id_check_status=Application.ID_CHECK_STATUS_AWAITING_UPDATE,
org_applicant=None,
proxy_applicant=None
).exclude(customer_status__in=(
Application.CUSTOMER_STATUS_ACCEPTED,
Application.CUSTOMER_STATUS_DECLINED)
).order_by('id')
if applications:
officers = applications[0].licence_officers
if applications[0].is_assigned:
officers = [applications[0].assigned_officer]
send_id_updated_notification(
instance, applications, officers, request
)
# assigned_officers = [application.assigned_officer.email
# for application
# in applications
# if application.assigned_officer]
# remove duplicate email addresses from assigned_officers list
# assigned_officers = list(dict.fromkeys(assigned_officers))
# if len(assigned_officers) > 0:
# send_id_updated_notification(instance, applications, assigned_officers, request)
serializer = UserSerializer(instance, partial=True)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['GET', ])
def pending_org_requests(self, request, *args, **kwargs):
try:
instance = self.get_object()
serializer = OrganisationRequestDTSerializer(
instance.organisationrequest_set.filter(
status=OrganisationRequest.ORG_REQUEST_STATUS_WITH_ASSESSOR),
many=True,
context={'request': request})
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@list_route(methods=['POST', ])
def create_new_person(self, request, *args, **kwargs):
print("create_new_person")
with transaction.atomic():
try:
email_user_id_requested = request.data.get('id', {})
email_address = request.data.get('email', '')
if not email_address:
first_name = request.data.get('first_name', '')
last_name = request.data.get('last_name', '')
email_address = generate_dummy_email(first_name, last_name)
if email_user_id_requested:
email_user_instance = EmailUser.objects.get(id=email_user_id_requested)
email_user_instance.email = email_address
else:
email_user_instance = EmailUser.objects.create_user(email_address, '')
request.data.update({'email': email_address})
email_user_serializer = SaveEmailUserSerializer(
email_user_instance,
data=request.data,
partial=True)
if email_user_serializer.is_valid(raise_exception=True):
email_user_serializer.save()
# Residential address
# UPDATE user_id of residential address in order to save the residential address
request.data['residential_address'].update({'user_id': email_user_serializer.data['id']})
residential_address_id_requested = request.data.get('residential_address', {}).get('id', {})
if residential_address_id_requested:
residential_address_instance = Address.objects.get(id=residential_address_id_requested)
address_serializer = SaveUserAddressSerializer(
instance=residential_address_instance,
data=request.data['residential_address'],
partial=True)
else:
address_serializer = SaveUserAddressSerializer(
data=request.data['residential_address'],
partial=True)
if address_serializer.is_valid(raise_exception=True):
address_serializer.save()
# Update relation between email_user and residential_address
request.data.update({'residential_address_id': address_serializer.data['id']})
email_user = EmailUser.objects.get(id=email_user_serializer.instance.id)
email_user_serializer = SaveEmailUserSerializer(email_user, request.data)
if email_user_serializer.is_valid():
email_user_serializer.save()
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
email_user = EmailUser.objects.get(id=email_user_serializer.instance.id)
email_user_serializer = UserSerializer(email_user,)
return Response(
email_user_serializer.data,
status=status.HTTP_201_CREATED,
headers=self.get_success_headers(email_user_serializer.data)
)
@detail_route(methods=['POST', ])
def update_system_preference(self, request, *args, **kwargs):
with transaction.atomic():
try:
prefer_compliance_management = request.data.get('prefer_compliance_management', False)
user_instance = self.get_object()
system_preference_instance = ComplianceManagementUserPreferences.objects.get(email_user_id=user_instance.id)
serializer = UpdateComplianceManagementUserPreferencesSerializer(
system_preference_instance,
data={
'email_user_id': user_instance.id,
'prefer_compliance_management': prefer_compliance_management
}
)
serializer.is_valid(raise_exception=True)
serializer.save()
return redirect('/')
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
class ComplianceManagementUserViewSet(viewsets.ModelViewSet):
queryset = EmailUser.objects.all()
serializer_class = UserSerializer
renderer_classes = [JSONRenderer, ]
def get_queryset(self):
"""
Optionally restrict the query if the following parameters are in the URL:
- first_name
- last_name
- dob
- email
"""
user = self.request.user
if is_internal(self.request):
queryset = EmailUser.objects.all()
elif is_customer(self.request):
queryset = EmailUser.objects.filter(id=user.id)
else:
queryset = EmailUser.objects.none()
first_name = self.request.query_params.get('first_name', None)
last_name = self.request.query_params.get('last_name', None)
dob = self.request.query_params.get('dob', None)
email = self.request.query_params.get('email', None)
if first_name is not None:
queryset = queryset.filter(first_name__iexact=first_name)
if last_name is not None:
queryset = queryset.filter(last_name__iexact=last_name)
if email is not None:
queryset = queryset.filter(email__iexact=email)
if dob is not None and dob is not u'':
queryset = queryset.filter(dob=dob)
return queryset
def create(self, request, *args, **kwargs):
print("cm user create")
print(request.data)
with transaction.atomic():
try:
request_data = request.data
email_address = request.data.get('email', '')
if not email_address:
first_name = request.data.get('first_name', '')
last_name = request.data.get('last_name', '')
email_address = generate_dummy_email(first_name, last_name)
request_data.update({'email': email_address})
email_user_instance = EmailUser.objects.create_user(email_address, '')
res = self.update_person(request, instance=email_user_instance)
return res
#print("user_serializer_data")
#print(type(user_serializer_data))
#print(user_serializer_data)
# return Response(
# user_serializer_data,
# status=status.HTTP_201_CREATED,
# #headers=self.get_success_headers(user_serializer.data)
# )
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
#@renderer_classes((JSONRenderer,))
def update_person(self, request, instance=None, *args, **kwargs):
print("cm user update")
print(request.data)
with transaction.atomic():
try:
if not instance:
instance = self.get_object()
request_data = request.data
email_address = request.data.get('email', '')
if not email_address:
first_name = request.data.get('first_name', '')
last_name = request.data.get('last_name', '')
email_address = generate_dummy_email(first_name, last_name)
request_data.update({'email': email_address})
residential_address_data = request_data.get('residential_address')
# residential address
if instance.residential_address:
print("update existing address")
#residential_address_instance = Address.objects.get(
# id=instance.residential_address.id)
address_serializer = ComplianceManagementSaveUserAddressSerializer(
instance.residential_address,
data=residential_address_data)
address_serializer.is_valid(raise_exception=True)
if address_serializer.is_valid:
saved_address = address_serializer.save()
request_data.update({'residential_address_id': saved_address.id})
elif residential_address_data:
print("create address")
print(residential_address_data)
residential_address_data.update({'user_id': instance.id})
address_serializer = ComplianceManagementSaveUserAddressSerializer(
data=residential_address_data)
address_serializer.is_valid(raise_exception=True)
if address_serializer.is_valid:
saved_address = address_serializer.save()
print("saved_address")
print(saved_address)
request_data.update({'residential_address_id': saved_address.id})
# now save EmailUser with residential_address_id, if it exists
print("request_data")
print(request_data)
user_serializer = ComplianceManagementSaveUserSerializer(
instance=instance,
data=request_data)
user_serializer.is_valid(raise_exception=True)
if user_serializer.is_valid:
saved_email_user = user_serializer.save()
email_user_refresh = EmailUser.objects.get(id=saved_email_user.id)
#return_serializer = UserSerializer(instance=instance)
print("email_user_refresh.residential_address")
print(email_user_refresh.residential_address)
return_serializer = ComplianceManagementUserSerializer(instance=email_user_refresh)
return Response(
return_serializer.data,
status=status.HTTP_201_CREATED,
headers=self.get_success_headers(user_serializer.data)
)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
class EmailIdentityViewSet(viewsets.ModelViewSet):
queryset = EmailIdentity.objects.all()
serializer_class = EmailIdentitySerializer
def get_queryset(self):
"""
Optionally restrict the query if the following parameters are in the URL:
- email
"""
user = self.request.user
if is_internal(self.request):
queryset = EmailIdentity.objects.all()
elif is_customer(self.request):
queryset = user.emailidentity_set.all()
else:
queryset = EmailIdentity.objects.none()
email = self.request.query_params.get('email', None)
exclude_user = self.request.query_params.get('exclude_user', None)
if email is not None:
queryset = queryset.filter(email__iexact=email)
if exclude_user is not None:
queryset = queryset.exclude(user=exclude_user)
return queryset
class CompliancePermissionGroupViewSet(viewsets.ModelViewSet):
queryset = CompliancePermissionGroup.objects.none()
serializer_class = CompliancePermissionGroupSerializer
renderer_classes = [JSONRenderer, ]
def get_queryset(self):
if is_internal(self.request):
return CompliancePermissionGroup.objects.all()
elif is_customer(self.request):
return CompliancePermissionGroup.objects.none()
return CompliancePermissionGroup.objects.none()
@list_route(methods=['GET', ])
def get_officers(self, request, *args, **kwargs):
try:
officers = EmailUser.objects.filter(groups__in=CompliancePermissionGroup.objects.filter(permissions__in=Permission.objects.filter(codename='officer')))
serializer = ComplianceUserDetailsOptimisedSerializer(officers, many=True)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@list_route(methods=['POST'])
def get_users(self, request, *args, **kwargs):
try:
users = (EmailUser.objects.filter(id__in=request.data.get('user_list')))
serializer = ComplianceUserDetailsOptimisedSerializer(users, many=True)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@list_route(methods=['GET', ])
def get_detailed_list(self, request, *args, **kwargs):
try:
serializer = CompliancePermissionGroupDetailedSerializer(
CompliancePermissionGroup.objects.all(),
many=True
)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
class RegionDistrictViewSet(viewsets.ModelViewSet):
queryset = RegionDistrict.objects.all()
serializer_class = RegionDistrictSerializer
renderer_classes = [JSONRenderer, ]
def get_queryset(self):
# import ipdb; ipdb.set_trace()
user = self.request.user
if is_internal(self.request):
return RegionDistrict.objects.all()
elif is_customer(self.request):
return RegionDistrict.objects.none()
return RegionDistrict.objects.none()
@list_route(methods=['GET', ])
def get_regions(self, request, *args, **kwargs):
try:
serializer = RegionDistrictSerializer(
RegionDistrict.objects.filter(region=None),
many=True
)
print(serializer.data)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['GET', ])
def get_region_districts(self, request, *args, **kwargs):
try:
instance = self.get_object()
serializer = RegionDistrictSerializer(
instance.districts.all(), many=True)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST', ])
def get_compliance_group_by_region_district(self, request, *args, **kwargs):
try:
instance = self.get_object()
group_permission = request.data.get('group_permission')
compliance_content_type = ContentType.objects.get(model="compliancepermissiongroup")
permission = Permission.objects.filter(codename=group_permission).filter(content_type_id=compliance_content_type.id).first()
group = CompliancePermissionGroup.objects.filter(region_district=instance).filter(permissions=permission).first()
print(group)
allocated_group = [{
'email': '',
'first_name': '',
'full_name': '',
'id': None,
'last_name': '',
'title': '',
}]
#serializer = ComplianceUserDetailsOptimisedSerializer(group.members, many=True)
serializer = CompliancePermissionGroupMembersSerializer(instance=group)
print(serializer.data)
for member in serializer.data['members']:
allocated_group.append(member)
return Response(data={'allocated_group': allocated_group, 'group_id': group.id})
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
class GetPersonOrg(views.APIView):
renderer_classes = [JSONRenderer,]
def get(self, request, format=None):
search_term = request.GET.get('term', '')
if search_term:
data_transform = []
user_data = EmailUser.objects.filter(
Q(first_name__icontains=search_term) |
Q(last_name__icontains=search_term) |
Q(email__icontains=search_term)
)[:10]
for email_user in user_data:
if email_user.dob:
text = '{} {} (DOB: {})'.format(email_user.first_name, email_user.last_name, email_user.dob)
else:
text = '{} {}'.format(email_user.first_name, email_user.last_name)
#serializer = EmailUserAppViewSerializer(email_user)
#email_user_data = serializer.data
email_user_data = {}
email_user_data['text'] = text
email_user_data['entity_type'] = 'user'
email_user_data['id'] = email_user.id
data_transform.append(email_user_data)
org_data = Organisation.objects.filter(
Q(organisation__name__icontains=search_term) |
Q(organisation__abn__icontains=search_term) |
Q(organisation__trading_name__icontains=search_term)
)[:10]
for org in org_data:
text = '{} (ABN: {})'.format(org.name, org.abn)
data = {}
data['text'] = text
data['entity_type'] = 'org'
data['id'] = org.id
data_transform.append(data)
## order results
data_transform.sort(key=lambda item: item.get("id"))
return Response({"results": data_transform})
return Response()
class StaffMemberLookup(views.APIView):
renderer_classes = [JSONRenderer,]
def get(self, request, format=None):
search_term = request.GET.get('term', '')
if search_term:
data_transform = []
user_data = EmailUser.objects.filter(is_staff=True).filter(
Q(first_name__icontains=search_term) |
Q(last_name__icontains=search_term) |
Q(email__icontains=search_term)
)[:10]
for email_user in user_data:
if email_user.dob:
text = '{} {} (DOB: {})'.format(email_user.first_name, email_user.last_name, email_user.dob)
else:
text = '{} {}'.format(email_user.first_name, email_user.last_name)
email_user_data = {}
email_user_data['text'] = text
email_user_data['id'] = email_user.id
data_transform.append(email_user_data)
return Response({"results": data_transform})
return Response()
| 42.46729
| 163
| 0.613644
|
794c86a250389728f63be756a8bb8b3f3b33b42a
| 4,142
|
py
|
Python
|
tools/quota-monitoring-alerting/python/src/common/lib/pubsub_lib.py
|
ruchirjain86/professional-services
|
739ac0f5ffc8237f750804fa9f0f14d4d918a0fa
|
[
"Apache-2.0"
] | 2,116
|
2017-05-18T19:33:05.000Z
|
2022-03-31T13:34:48.000Z
|
tools/quota-monitoring-alerting/python/src/common/lib/pubsub_lib.py
|
ruchirjain86/professional-services
|
739ac0f5ffc8237f750804fa9f0f14d4d918a0fa
|
[
"Apache-2.0"
] | 548
|
2017-05-20T05:05:35.000Z
|
2022-03-28T16:38:12.000Z
|
tools/quota-monitoring-alerting/python/src/common/lib/pubsub_lib.py
|
ruchirjain86/professional-services
|
739ac0f5ffc8237f750804fa9f0f14d4d918a0fa
|
[
"Apache-2.0"
] | 1,095
|
2017-05-19T00:02:36.000Z
|
2022-03-31T05:21:39.000Z
|
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to interact with pubsub messaging."""
import base64
import logging
import simplejson as json
from src.common.lib import gcp
def _validate(envelope):
"""Validate pubsub message envelope.
Raises:
ValueError, if expected data is not present in pubsub envelope.
"""
logging.info('Pubsub: Validating pubsub message')
if 'message' not in envelope:
raise ValueError('PubsubHelper: No message in envelope')
message = envelope.get('message', {})
if 'data' not in message:
raise ValueError('PubsubHelper: No data in message')
if 'attributes' not in message:
raise ValueError('PubsubHelper: No attributes in message')
def _encode(message):
"""Encode data to make it publishable to pubsub.
Args:
message: dict, data that needs to be encoded.
"""
message['data'] = json.dumps(message.get('data', '')).encode('utf-8')
message['attributes'] = {
k: json.dumps(v).encode('utf-8')
for k, v in message.get('attributes', {}).items()
}
def _decode(message):
"""Decode pubsub message data.
Args:
message: dict, data that needs to be decoded.
"""
message['data'] = json.loads(base64.b64decode(message['data']))
metadata = message['attributes']
message['attributes'] = {k: json.loads(v) for k, v in metadata.items()}
def process_envelope(request_data):
"""Process pubsub envelope data.
Args:
request_data: pubsub request object.
Returns:
dict, the message from pubsub request object.
"""
logging.info('PubsubHelper: Processing request data')
envelope = json.loads(request_data.decode('utf-8'))
_validate(envelope)
return process_message(envelope.get('message', {}))
def build_message(data, **metadata):
"""Build pubsub message obj for given input.
Args:
data: dict, data that needs to be published.
metadata: dict, attributes for pubsub message.
Returns:
dict, encoded data.
"""
message = {
'data': data,
'attributes': {
'batch_id': metadata.get('batch_id', ''),
'message_id': metadata.get('message_id', ''),
'publish_time': metadata.get('publishTime', ''),
'src_message_id': metadata.get('src_message_id', ''),
}
}
_encode(message)
return message
def process_message(message):
"""Process pubsub message.
Args:
message: dict, encode data.
Returns:
dict, decoded data.
"""
_decode(message)
data, metadata = message['data'], message['attributes']
logging.info('PubsubHelper: message_id - %s and src_message_id - %s',
metadata.get('message_id', ''),
metadata.get('src_message_id', ''))
return data, metadata
def publish_message(pubsub_project, pubsub_topic, message):
"""Public message to a topic.
Args:
pubsub_project: str, project id.
pubsub_topic: str, topic name.
message: dict, data that needs to be published.
Returns:
result from publish request.
"""
logging.info('PubsubHelper: Publishing message to - %s, %s',
pubsub_project, pubsub_topic)
publisher = gcp.pubsub_client()
# pylint:disable=no-member
topic_path = publisher.topic_path(pubsub_project, pubsub_topic)
# pylint:enable=no-member
future = publisher.publish(topic_path, message['data'],
**message['attributes'])
return future.result()
| 29.375887
| 75
| 0.649445
|
794c87427087aadbee2661e46f69add11eaa75ed
| 4,775
|
py
|
Python
|
app/eg005_envelope_recipients.py
|
AaronWDS/eg-03-python-auth-code-grant
|
6cb1694cbbb8cdbf166fda282e81886fd8380e5e
|
[
"MIT"
] | null | null | null |
app/eg005_envelope_recipients.py
|
AaronWDS/eg-03-python-auth-code-grant
|
6cb1694cbbb8cdbf166fda282e81886fd8380e5e
|
[
"MIT"
] | null | null | null |
app/eg005_envelope_recipients.py
|
AaronWDS/eg-03-python-auth-code-grant
|
6cb1694cbbb8cdbf166fda282e81886fd8380e5e
|
[
"MIT"
] | null | null | null |
"""005: List an envelope's recipients and status"""
from flask import render_template, url_for, redirect, session, flash, request
from os import path
import json
from app import ds_config, views
from docusign_esign import *
from docusign_esign.rest import ApiException
eg = "eg005" # reference (and url) for this example
def controller():
"""Controller router using the HTTP method"""
if request.method == 'GET':
return get_controller()
elif request.method == 'POST':
return create_controller()
else:
return render_template('404.html'), 404
def create_controller():
"""
1. Check the token
2. Call the worker method
3. Show results
"""
minimum_buffer_min = 3
token_ok = views.ds_token_ok(minimum_buffer_min)
if token_ok and 'envelope_id' in session:
# 2. Call the worker method
args = {
'account_id': session['ds_account_id'],
'envelope_id': session['envelope_id'],
'base_path': session['ds_base_path'],
'ds_access_token': session['ds_access_token'],
}
try:
results = worker(args)
except ApiException as err:
error_body_json = err and hasattr(err, 'body') and err.body
# we can pull the DocuSign error code and message from the response body
error_body = json.loads(error_body_json)
error_code = error_body and 'errorCode' in error_body and error_body['errorCode']
error_message = error_body and 'message' in error_body and error_body['message']
# In production, may want to provide customized error messages and
# remediation advice to the user.
return render_template('error.html',
err=err,
error_code=error_code,
error_message=error_message
)
return render_template("example_done.html",
title="Envelope recipients results",
h1="List the envelope's recipients and their status",
message="Results from the EnvelopesRecipients::list method:",
json=json.dumps(json.dumps(results.to_dict()))
)
elif not token_ok:
flash('Sorry, you need to re-authenticate.')
# We could store the parameters of the requested operation
# so it could be restarted automatically.
# But since it should be rare to have a token issue here,
# we'll make the user re-enter the form data after
# authentication.
session['eg'] = url_for(eg)
return redirect(url_for('ds_must_authenticate'))
elif not 'envelope_id' in session:
return render_template("eg005_envelope_recipients.html",
title="Envelope recipient information",
envelope_ok=False,
source_file=path.basename(__file__),
source_url=ds_config.DS_CONFIG['github_example_url'] + path.basename(__file__),
documentation=ds_config.DS_CONFIG['documentation'] + eg,
show_doc=ds_config.DS_CONFIG['documentation'],
)
# ***DS.snippet.0.start
def worker(args):
"""
1. Call the envelope recipients list method
"""
# Exceptions will be caught by the calling function
api_client = ApiClient()
api_client.host = args['base_path']
api_client.set_default_header("Authorization", "Bearer " + args['ds_access_token'])
envelope_api = EnvelopesApi(api_client)
results = envelope_api.list_recipients(args['account_id'], args['envelope_id'])
return results
# ***DS.snippet.0.end
def get_controller():
"""responds with the form for the example"""
if views.ds_token_ok():
return render_template("eg005_envelope_recipients.html",
title="Envelope recipient information",
envelope_ok='envelope_id' in session,
source_file=path.basename(__file__),
source_url=ds_config.DS_CONFIG['github_example_url'] + path.basename(__file__),
documentation=ds_config.DS_CONFIG['documentation'] + eg,
show_doc=ds_config.DS_CONFIG['documentation'],
)
else:
# Save the current operation so it will be resumed after authentication
session['eg'] = url_for(eg)
return redirect(url_for('ds_must_authenticate'))
| 41.885965
| 110
| 0.588691
|
794c87cdb2b68febe98b9ab3c244e5202199ab05
| 5,950
|
py
|
Python
|
PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_BLDC_ABN_encoder_offset_estimation.py
|
trinamic-AA/PyTrinamic
|
b054f4baae8eb6d3f5d2574cf69c232f66abb4ee
|
[
"MIT"
] | 37
|
2019-01-13T11:08:45.000Z
|
2022-03-25T07:18:15.000Z
|
PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_BLDC_ABN_encoder_offset_estimation.py
|
AprDec/PyTrinamic
|
a9db10071f8fbeebafecb55c619e5893757dd0ce
|
[
"MIT"
] | 56
|
2019-02-25T02:48:27.000Z
|
2022-03-31T08:45:34.000Z
|
PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_BLDC_ABN_encoder_offset_estimation.py
|
AprDec/PyTrinamic
|
a9db10071f8fbeebafecb55c619e5893757dd0ce
|
[
"MIT"
] | 26
|
2019-01-14T05:20:16.000Z
|
2022-03-08T13:27:35.000Z
|
#!/usr/bin/env python3
'''
Created on 26.02.2019
@author: ED
'''
import time
from PyTrinamic.connections.ConnectionManager import ConnectionManager
from PyTrinamic.evalboards.TMC4671_eval import TMC4671_eval
from PyTrinamic.ic.TMC4671.TMC4671 import TMC4671 as TMC4671_IC
from PyTrinamic.connections.uart_ic_interface import uart_ic_interface
connectionManager = ConnectionManager()
myInterface = connectionManager.connect()
if isinstance(myInterface, uart_ic_interface):
# Create an TMC4671 IC class which communicates directly over UART
TMC4671 = TMC4671_IC(myInterface)
else:
# Create an TMC4671-Eval class which communicates over the Landungsbruecke via TMCL
TMC4671 = TMC4671_eval(myInterface)
" read ChipInfo "
TMC4671.showChipInfo
" ===== 1) base configuration ====="
polePairs = 4
enocoderResolution = 4000
" Motor type & PWM configuration "
TMC4671.writeRegister(TMC4671.registers.MOTOR_TYPE_N_POLE_PAIRS, 0x00030000 | polePairs)
TMC4671.writeRegister(TMC4671.registers.PWM_POLARITIES, 0x00000000)
TMC4671.writeRegister(TMC4671.registers.PWM_MAXCNT, int(0x00000F9F))
TMC4671.writeRegister(TMC4671.registers.PWM_BBM_H_BBM_L, 0x00000505)
TMC4671.writeRegister(TMC4671.registers.PWM_SV_CHOP, 0x00000007)
" ADC configuration "
TMC4671.writeRegister(TMC4671.registers.ADC_I_SELECT, 0x18000100)
TMC4671.writeRegister(TMC4671.registers.dsADC_MCFG_B_MCFG_A, 0x00100010)
TMC4671.writeRegister(TMC4671.registers.dsADC_MCLK_A, 0x20000000)
TMC4671.writeRegister(TMC4671.registers.dsADC_MCLK_B, 0x00000000)
TMC4671.writeRegister(TMC4671.registers.dsADC_MDEC_B_MDEC_A, int(0x014E014E))
TMC4671.writeRegister(TMC4671.registers.ADC_I0_SCALE_OFFSET, 0x01008218)
TMC4671.writeRegister(TMC4671.registers.ADC_I1_SCALE_OFFSET, 0x0100820A)
" ABN encoder settings "
TMC4671.writeRegister(TMC4671.registers.ABN_DECODER_MODE, 0x00001000)
TMC4671.writeRegister(TMC4671.registers.ABN_DECODER_PPR, enocoderResolution)
TMC4671.writeRegister(TMC4671.registers.ABN_DECODER_COUNT, 0x0)
TMC4671.writeRegister(TMC4671.registers.ABN_DECODER_PHI_E_PHI_M_OFFSET, 0x0)
" Open loop settings "
TMC4671.writeRegister(TMC4671.registers.OPENLOOP_MODE, 0x00000000)
TMC4671.writeRegister(TMC4671.registers.OPENLOOP_ACCELERATION, 0x0000003C)
" Limits "
TMC4671.writeRegister(TMC4671.registers.PID_TORQUE_FLUX_LIMITS, 1000)
" PI settings "
TMC4671.writeRegister(TMC4671.registers.PID_TORQUE_P_TORQUE_I, 0x01000100)
TMC4671.writeRegister(TMC4671.registers.PID_FLUX_P_FLUX_I, 0x01000100)
" ===== 2) estimate the encoder offset ====="
" Init encoder (mode 0) "
" put a voltage on the motor and wait 1 second for alignment "
TMC4671.writeRegister(TMC4671.registers.MODE_RAMP_MODE_MOTION, 0x00000008)
TMC4671.writeRegister(TMC4671.registers.ABN_DECODER_PHI_E_PHI_M_OFFSET, 0x00000000)
TMC4671.writeRegister(TMC4671.registers.PHI_E_SELECTION, TMC4671.registers.PHI_E_EXTERNAL)
TMC4671.writeRegister(TMC4671.registers.PHI_E_EXT, 0x00000000)
TMC4671.writeRegister(TMC4671.registers.UQ_UD_EXT, 2000)
time.sleep(1)
" clear abn_decoder_count "
TMC4671.writeRegister(TMC4671.registers.ABN_DECODER_COUNT, 0x00000000)
print("abn_decoder_count:" + str(TMC4671.readRegister(TMC4671.registers.ABN_DECODER_COUNT)))
" Switch to open loop velocity mode "
TMC4671.writeRegister(TMC4671.registers.PHI_E_SELECTION, TMC4671.registers.PHI_E_OPEN_LOOP)
TMC4671.writeRegister(TMC4671.registers.OPENLOOP_VELOCITY_TARGET, 60)
startTime = time.time()
while True:
print("dec: " + str(TMC4671.readRegister(TMC4671.registers.ABN_DECODER_COUNT)) + " dec_n: " + str(TMC4671.readRegister(TMC4671.registers.ABN_DECODER_COUNT_N)))
" stop after 3 seconds "
if (time.time()-startTime) > 3:
break
" read encoder offset at N channel"
decoderCountN = TMC4671.readRegister(TMC4671.registers.ABN_DECODER_COUNT_N)
decoderCountN_offset = decoderCountN % (enocoderResolution / polePairs)
print("abn_decoder_count_n:" + str(decoderCountN))
print("=> estimated encoder offset: " + str(decoderCountN_offset))
" ===== 3) use the estimated offset ====="
" write offset "
TMC4671.writeRegister(TMC4671.registers.ABN_DECODER_PHI_E_PHI_M_OFFSET, int(decoderCountN_offset))
" ===== 4) got to encoder mode ===== "
" Feedback selection "
TMC4671.writeRegister(TMC4671.registers.PHI_E_SELECTION, TMC4671.registers.PHI_E_ABN)
TMC4671.writeRegister(TMC4671.registers.VELOCITY_SELECTION, TMC4671.registers.VELOCITY_PHI_M_ABN)
" Switch to torque mode "
TMC4671.writeRegister(TMC4671.registers.MODE_RAMP_MODE_MOTION, TMC4671.registers.MOTION_MODE_TORQUE)
" ===== 5) make a testdrive ====="
maxVelocity = 0
minVelocity = 0
print("rotate right...")
TMC4671.writeRegister(TMC4671.registers.PID_TORQUE_FLUX_TARGET, 0x03E80000)
startTime = time.time()
while True:
velocity = TMC4671.readRegister(TMC4671.registers.PID_VELOCITY_ACTUAL, signed=True)
print("velocity: " + str(velocity))
if velocity > maxVelocity:
maxVelocity = velocity
" stop after 3 seconds "
if (time.time()-startTime) > 2:
break
print("rotate left...")
TMC4671.writeRegister(TMC4671.registers.PID_TORQUE_FLUX_TARGET, int(0xFC180000))
startTime = time.time()
while True:
velocity = TMC4671.readRegister(TMC4671.registers.PID_VELOCITY_ACTUAL, signed=True)
print("velocity: " + str(velocity))
if velocity < minVelocity:
minVelocity = velocity
" stop after 3 seconds "
if (time.time()-startTime) > 2:
break
print("stop motor")
TMC4671.writeRegister(TMC4671.registers.PID_TORQUE_FLUX_TARGET, 0)
" ===== 6) short summary ====="
print(" === summary === ")
print("abn_decoder_count_n:" + str(decoderCountN))
print("estimated encoder offset: " + str(decoderCountN_offset))
print("maxVelocity:" + str(maxVelocity))
print("minVelocity:" + str(minVelocity))
myInterface.close()
| 35.416667
| 164
| 0.771597
|
794c883318d96411c1d7c5991eaebafa3551e364
| 448
|
py
|
Python
|
cfgread/cfg03.py
|
MarkDuenas/mycode
|
99f5a83ee8448b565a8d53e1c62a12bdfd2cc6ad
|
[
"MIT"
] | null | null | null |
cfgread/cfg03.py
|
MarkDuenas/mycode
|
99f5a83ee8448b565a8d53e1c62a12bdfd2cc6ad
|
[
"MIT"
] | null | null | null |
cfgread/cfg03.py
|
MarkDuenas/mycode
|
99f5a83ee8448b565a8d53e1c62a12bdfd2cc6ad
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
## create file object in "r"ead mode
file_name = input("Enter file to read: \n")
with open(file_name, "r") as configfile:
## readlines() creates a list by reading target
## file line by line
configlist = configfile.readlines()
## file was just auto closed (no more indenting)
## each item of the list now has the "\n" characters back
print(configlist)
print(f"There are {len(configlist)} lines in this file")
| 28
| 57
| 0.703125
|
794c8860b3c5882eb1510bc1b029f1df84aadfdc
| 1,617
|
py
|
Python
|
xls_to_cs.py
|
wryl/ET_xls_to_cs
|
18b0b5a22c4e5aca238431b57eb37c5366320d4c
|
[
"MIT"
] | null | null | null |
xls_to_cs.py
|
wryl/ET_xls_to_cs
|
18b0b5a22c4e5aca238431b57eb37c5366320d4c
|
[
"MIT"
] | null | null | null |
xls_to_cs.py
|
wryl/ET_xls_to_cs
|
18b0b5a22c4e5aca238431b57eb37c5366320d4c
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# coding=utf-8
import codecs
import copy
import os
import traceback
from itertools import groupby
import time,datetime
import sys
from jinja2 import Environment, FileSystemLoader
import xlrd
def get_action_fullname(onerow,offset=0):
"""获取完整名称
:param onerow: 一行数据
:param offset: 是否需要反向
"""
header= "" if not onerow[4] else "%s2%s_" % (onerow[4+offset][0],onerow[5-offset][0])
body=onerow[3]
footer=""
if onerow[6]==1 and onerow[7]==1: # 同时满足是actor并且有返回消息时才加后缀
footer = "Request" if offset == 0 else "Response"
return header+body+footer
file_template_path = "template"
env = Environment(loader=FileSystemLoader(file_template_path))
env.filters['get_action_fullname'] = get_action_fullname # 自定义过滤器
excel_file=xlrd.open_workbook('config.xlsx')
def create_files_by_messagetype(excel_sheets, filename):
sheet = excel_sheets.sheet_by_name(filename)
sheet_list = [sheet.row_values(i) for i in range(2, sheet.nrows)]
temple = env.get_template('Opcode.txt')
get_str = temple.render(sheet=sheet_list,message_type=filename)
new_file = codecs.open("gen/%sOpcode.cs"%filename, 'w', "utf-8")
new_file.write(get_str)
new_file.close()
# 生成
temple = env.get_template('Message.template')
get_str = temple.render(sheet=sheet_list)
new_file = codecs.open("gen/%sMessage.cs"%filename, 'w', "utf-8")
new_file.write(get_str)
new_file.close()
for onerow in sheet_list:
if onerow[2]:
temple = env.get_template('Message.template')
# 热更相关
create_files_by_messagetype(excel_file,"Hotfix")
| 28.368421
| 89
| 0.709957
|
794c886d9799bf4d1e386f5e6c4d06ae24800764
| 1,911
|
py
|
Python
|
dynamo/tools/dynamo_bk.py
|
davisidarta/dynamo-release
|
0dbd769f52ea07f3cdaa8fb31022ceb89938c382
|
[
"BSD-3-Clause"
] | null | null | null |
dynamo/tools/dynamo_bk.py
|
davisidarta/dynamo-release
|
0dbd769f52ea07f3cdaa8fb31022ceb89938c382
|
[
"BSD-3-Clause"
] | null | null | null |
dynamo/tools/dynamo_bk.py
|
davisidarta/dynamo-release
|
0dbd769f52ea07f3cdaa8fb31022ceb89938c382
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from scipy.optimize import least_squares
from sklearn.decomposition import TruncatedSVD
from sklearn.manifold import TSNE
from sklearn.neighbors import NearestNeighbors
from scipy.stats import norm as normal
def sol_u(t, u0, alpha, beta):
return u0 * np.exp(-beta * t) + alpha / beta * (1 - np.exp(-beta * t))
def sol_s(t, s0, u0, alpha, beta, gamma):
exp_gt = np.exp(-gamma * t)
return (
s0 * exp_gt
+ alpha / gamma * (1 - exp_gt)
+ (alpha + u0 * beta) / (gamma - beta) * (exp_gt - np.exp(-beta * t))
)
def fit_gamma_labelling(t, l, mode=None, lbound=None):
n = l.size
tau = t - np.min(t)
tm = np.mean(tau)
# prepare y
if lbound is not None:
l[l < lbound] = lbound
y = np.log(l)
ym = np.mean(y)
# calculate slope
var_t = np.mean(tau ** 2) - tm ** 2
cov = np.sum(y.dot(tau)) / n - ym * tm
k = cov / var_t
# calculate intercept
b = np.exp(ym - k * tm) if mode != "fast" else None
return -k, b
def fit_alpha_labelling(t, u, gamma, mode=None):
n = u.size
tau = t - np.min(t)
expt = np.exp(gamma * tau)
# prepare x
x = expt - 1
xm = np.mean(x)
# prepare y
y = u * expt
ym = np.mean(y)
# calculate slope
var_x = np.mean(x ** 2) - xm ** 2
cov = np.sum(y.dot(x)) / n - ym * xm
k = cov / var_x
# calculate intercept
b = ym - k * xm if mode != "fast" else None
return k * gamma, b
def fit_gamma_splicing(t, s, beta, u0, bounds=(0, np.inf)):
tau = t - np.min(t)
s0 = np.mean(s[tau == 0])
g0 = beta * u0 / s0
f_lsq = lambda g: sol_s(tau, u0, s0, 0, beta, g) - s
ret = least_squares(f_lsq, g0, bounds=bounds)
return ret.x, s0
def fit_gamma(u, s):
cov = u.dot(s) / len(u) - np.mean(u) * np.mean(s)
var_s = s.dot(s) / len(s) - np.mean(s) ** 2
gamma = cov / var_s
return gamma
| 23.024096
| 77
| 0.56044
|
794c88a2e03cf983adddc8b2f0ac53fc4027da4f
| 394
|
py
|
Python
|
python/237_Delete_Node_in_a_Linked_List.py
|
dvlpsh/leetcode-1
|
f965328af72113ac8a5a9d6624868c1502be937b
|
[
"MIT"
] | 4,416
|
2016-03-30T15:02:26.000Z
|
2022-03-31T16:31:03.000Z
|
python/237_Delete_Node_in_a_Linked_List.py
|
YinpuLi/leetcode-6
|
1371de2631d745efba39de41b51c3424e35da434
|
[
"MIT"
] | 20
|
2018-11-17T13:46:25.000Z
|
2022-03-13T05:37:06.000Z
|
python/237_Delete_Node_in_a_Linked_List.py
|
YinpuLi/leetcode-6
|
1371de2631d745efba39de41b51c3424e35da434
|
[
"MIT"
] | 1,374
|
2017-05-26T15:44:30.000Z
|
2022-03-30T19:21:02.000Z
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val = node.next.val
node.next = node.next.next
| 28.142857
| 74
| 0.583756
|
794c88eed627452f97f98c21e424cdd3c4ac8fb7
| 1,422
|
py
|
Python
|
data_structures/graphs/mother_vertex.py
|
ruler30cm/python-ds
|
f84605c5b746ea1d46de3d00b86f5fba399445c7
|
[
"MIT"
] | 1,723
|
2019-07-30T07:06:22.000Z
|
2022-03-31T15:22:22.000Z
|
data_structures/graphs/mother_vertex.py
|
ruler30cm/python-ds
|
f84605c5b746ea1d46de3d00b86f5fba399445c7
|
[
"MIT"
] | 213
|
2019-10-06T08:07:47.000Z
|
2021-10-04T15:38:36.000Z
|
data_structures/graphs/mother_vertex.py
|
ruler30cm/python-ds
|
f84605c5b746ea1d46de3d00b86f5fba399445c7
|
[
"MIT"
] | 628
|
2019-10-06T10:26:25.000Z
|
2022-03-31T01:41:00.000Z
|
"""
A mother vertex is a vertex such that all other vertices
can be reached by a path from this vertex
Reference - https://www.geeksforgeeks.org/find-a-mother-vertex-in-a-graph/
Time complexity - 2 * O(V + E) = O(V + E)
"""
from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.V = vertices
self.graph = defaultdict(list)
def dfs_util(self, v, visited):
visited[v] = True
for i in self.graph[v]:
if visited[i] == False:
self.dfs_util(i, visited)
def add_edge(self, v, w):
self.graph[v].append(w)
def find_mother(self):
visited = [False] * self.V
v = 0
for i in range(self.V):
if visited[i] == False:
self.dfs_util(i, visited)
v = i
# Now check if v is one of the mother vertex
# Reset all the values of visited and do DFS beginning from v
# to check if all the vertices are reachable from it or not
visited = [False] * self.V
self.dfs_util(v, visited)
if any(i == False for i in visited):
return -1
else:
return v
g = Graph(7)
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 3)
g.add_edge(4, 1)
g.add_edge(6, 4)
g.add_edge(5, 6)
g.add_edge(5, 2)
g.add_edge(6, 0)
print("Mother vertex is - ", g.find_mother())
| 21.545455
| 74
| 0.563994
|
794c89ef7a77e2fd063586136cf0aaaf73c3d385
| 7,240
|
py
|
Python
|
CNN_Model/run_3d_cnn.py
|
jessecha/OPCAS
|
2b51543b4ad1ee37dba2e45a0c7d0b872309d418
|
[
"MIT"
] | 1
|
2021-02-28T05:58:50.000Z
|
2021-02-28T05:58:50.000Z
|
CNN_Model/run_3d_cnn.py
|
jessecha/OPCAS
|
2b51543b4ad1ee37dba2e45a0c7d0b872309d418
|
[
"MIT"
] | null | null | null |
CNN_Model/run_3d_cnn.py
|
jessecha/OPCAS
|
2b51543b4ad1ee37dba2e45a0c7d0b872309d418
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import cv2
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto(allow_soft_placement=True, device_count = {'CPU' : 1, 'GPU' : 1})
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
import pickle
import os
import cv2
import numpy as np
import time
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers import Flatten, Activation, Dense, Dropout, MaxPooling3D, Conv3D
from keras import optimizers
from keras import regularizers
from keras.layers.normalization import BatchNormalization
from keras.layers.noise import AlphaDropout
from keras import callbacks
from sklearn.externals import joblib
import matplotlib.pyplot as plt
from data_utils.data_processor import load_dataset
from model.models import build_3d_cnn
from model_test_utils.metrics import mean_absolute_relative_error
from model_test_utils.metrics import coefficient_of_determination
from keras.layers.advanced_activations import ELU
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import tensorflow as tf
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
def main(*args, **kwargs):
if kwargs['n_jump'] == 0:
kwargs['n_jump'] = kwargs['n_stacked']
saved_file_name = './keras_3dcnn_{}stacked_{}jumps_{}depth.hdf5'.format(
kwargs['n_stacked'], kwargs['n_jump'], kwargs['depth'])
data_path = os.path.join(
os.path.dirname(os.path.abspath(os.path.dirname(__file__))),
'dataset'
)
img_path = os.path.join(kwargs['img_path'])
out_path = os.path.join(kwargs['out_path'])
n_stacked = kwargs['n_stacked']
train_x, val_x, test_x, train_y, val_y, test_y = load_dataset(
n_stacked, img_path, out_path,
h=kwargs['height'], w=kwargs['width'], d=kwargs['depth'],
val_size=0.04, test_size=0.04,
n_jump=kwargs['n_jump']
)
print("number of train images:", train_x.shape)
print("number of validation images:", val_x.shape)
print("number of test images:", test_x.shape)
print("number of train output sets:", train_y.shape)
print("number of validation output sets:", val_y.shape)
print("number of test output sets:", test_y.shape)
with tf.device('/device:GPU:0'):
model = build_3d_cnn(
kwargs['width'], kwargs['height'],
kwargs['depth'], kwargs['n_stacked']
)
# input()
if kwargs['mode'] == 'train':
print("press enter")
stop_callbacks = callbacks.EarlyStopping(
monitor='val_loss', patience=30, verbose=0, mode='min', min_delta=0
)
checkpoint = callbacks.ModelCheckpoint(
saved_file_name, monitor='val_loss',
verbose=1, save_best_only=True, mode='min'
)
history = model.fit(
train_x, train_y,
batch_size=kwargs['batch_size'], epochs=kwargs['epochs'],
callbacks=[stop_callbacks,checkpoint],
validation_data=(val_x, val_y), shuffle=True
)
# test always
print("Start test....")
model.load_weights(saved_file_name)
model_y_val = model.predict(val_x, batch_size=None, verbose=0)
model_y = model.predict(test_x, batch_size=None, verbose=0)
# train result
if kwargs['mode'] == 'train':
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
# val result
attrs = ['steering', 'throttle']
for i in range(2):
mare = mean_absolute_relative_error(val_y[:,i], model_y_val[:,i])
print(attrs[i] +' mare: ' + str(mare))
R2_val = coefficient_of_determination(val_y[:,i], model_y_val[:,i])
print(attrs[i] +'R^2: ' + str(R2_val))
csvdata = pd.DataFrame(val_y, columns=attrs)
csvdata['model_steering'] = model_y_val[:,0]
csvdata['model_throttle'] = model_y_val[:,1]
result_file_name = './result_val_3dcnn_{}stacked_{}jumps_{}depth.csv'.format(
kwargs['n_stacked'], kwargs['n_jump'], kwargs['depth'])
csvdata.to_csv(result_file_name)
print('val result saved')
# test result
attrs = ['steering', 'throttle']
for i in range(2):
mare = mean_absolute_relative_error(test_y[:,i], model_y[:,i])
print(attrs[i] +' mare: ' + str(mare))
R2_val = coefficient_of_determination(test_y[:,i], model_y[:,i])
print(attrs[i] +'R^2: ' + str(R2_val))
print("maximum test accuracy was " + str(max(test_y[:,i])))
csvdata = pd.DataFrame(test_y, columns=attrs)
csvdata['model_steering'] = model_y[:,0]
csvdata['model_throttle'] = model_y[:,1]
result_file_name = './result_3dcnn_{}stacked_{}jumps_{}depth.csv'.format(
kwargs['n_stacked'], kwargs['n_jump'], kwargs['depth'])
csvdata.to_csv(result_file_name)
print('test result saved')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"mode", help="train/test",
type=str, choices=["train", "test"]
)
parser.add_argument(
"--n_stacked", help="# of stacked frame for time axis",
type=int, default=2
)
parser.add_argument(
"--n_jump", help="time interval to get input, 0 for n_jump=n_stacked",
type=int, default=1
)
parser.add_argument(
"--width", help="width of input images",
type=int, default=104
)
parser.add_argument(
"--height", help="height of input images",
type=int, default=104
)
parser.add_argument(
"--depth", help="the number of channels of input images",
type=int, default=3
)
parser.add_argument(
"--img_path", help="image directory",
type=str, default='/home/jesse/Desktop/imagefiles/image_set'
)
parser.add_argument(
"--out_path", help="target csv filename",
type=str, default='/home/jesse/Desktop/training_dataset.csv'
)
parser.add_argument(
"--epochs", help="total number of training epochs",
type=int, default=50000
)
parser.add_argument(
"--batch_size", help="batch_size",
type=int, default=32
)
args = parser.parse_args()
main(**vars(args))
| 37.512953
| 90
| 0.62942
|
794c8a90d5288df252b14f1633cf278a9ff6c27f
| 7,167
|
py
|
Python
|
models/sentiment_train_model.py
|
PierreLessard/Public-Morale-Over-Covid
|
14d8d7616a1b17dc2e6b5f76ccad09f7630da28a
|
[
"Apache-2.0"
] | 1
|
2021-12-11T22:36:30.000Z
|
2021-12-11T22:36:30.000Z
|
models/sentiment_train_model.py
|
PierreLessard/Public-Morale-Over-Covid
|
14d8d7616a1b17dc2e6b5f76ccad09f7630da28a
|
[
"Apache-2.0"
] | null | null | null |
models/sentiment_train_model.py
|
PierreLessard/Public-Morale-Over-Covid
|
14d8d7616a1b17dc2e6b5f76ccad09f7630da28a
|
[
"Apache-2.0"
] | 3
|
2021-12-11T07:31:31.000Z
|
2021-12-12T05:10:25.000Z
|
"""Structure for model trainer loosely taken from:
https://realpython.com/sentiment-analysis-python mainly for guide on spacy.
dataset used is from https://ai.stanford.edu/~amaas/data/sentiment/
using version 2.3.5 of spacy as version 3 includes api issues when trying to use en cor web sm
"""
import os
from random import shuffle
import spacy
import pickle
from spacy.util import minibatch, compounding
from spacy.tokenizer import Tokenizer
from spacy.pipeline import Morphologizer
import csv
def format_training_data(direc: str = "data/training/aclImdb/train") -> None:
"""
Loads the training data from file_directory and stores the data into a pickle file
Do not run if you have not downloaded and extracted the files from the downloadable tar.gz
Raw training data is not included in the repo as it aquirable from the link above
"""
reviews = []
# we have a folder of positive reviews and negative reviews so well do two iterations
for cat in ('pos', 'neg'):
# grabs each individual review (each review is stored in its own text file)
for review_direc in filter(lambda j: j[-4:] == '.txt', os.listdir(f'{direc}/{cat}')):
with open(f'{direc}/{cat}/{review_direc}', encoding="Latin-1") as f:
# cleans the text and cattegorizes it
reviews.append((f.read().replace('<br />', r'\n\n').strip(),
{'cats':{'pos': 'pos' == cat,'neg': 'neg' == cat}}))
with open('data/training/movie_reviews_data.pkl', 'wb') as f:
pickle.dump(reviews, f)
def shuffle_training_data(data: list, split: int = .8) -> tuple[list]:
"""
shuffles the data and separates it by split in order to have a
training dataset and a testing dataset. Default is a 4:1 split
as recommended
"""
shuffle(data)
return data[int(len(data) * split):], data[:int(len(data) * split)]
def grab_training_data(shuffle: bool = False, direc: str = 'data/training/movie_reviews_data.pkl') -> tuple[list]:
"""
Opens the reviews stored in the pickle file.
If shuffle is true that means that we should get the data
ready by running shuffle_training_Data
"""
with open(direc, 'rb') as f:
reviews = pickle.load(f)
return shuffle_training_data(reviews) if shuffle else tuple(reviews)
def save_model(nlp, optimizer, directory: str = 'models/sentiment/model_artifacts') -> None:
"""saves the given model"""
with nlp.use_params(optimizer.averages):
nlp.to_disk(directory)
print(f"Model Saved to {directory}")
def write_data_to_csv(data: dict, loss: dict, count: int, csv_direc: str = 'models/sentiment/evaluations.csv') -> None:
"""Writes the evaluation data to csv file"""
new_row = [count, loss['textcat'], data['precision'], data['recall'], data['f-score']]
if not count:
fields = ["MODEL NUMBER", "LOSS", "PRECISION", "RECALL", "F-SCORE"]
with open(csv_direc, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
csvwriter.writerow(new_row)
else:
with open(csv_direc, 'a', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(new_row)
def train_model(training_data: list[tuple], test_data: list[tuple], count: int) -> None:
"""
Trains model given training data. Code structure taken from https://realpython.com/sentiment-analysis-python
Changes were made due to some efficiency issues, unclear code, and outdated uses of APIs and libraries
"""
results_txt = []
nlp = spacy.load("en_core_web_sm") # for en_core_web_sm legacy issue, pip3 install:
# https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.2.0/en_core_web_sm-2.2.0.tar.gz
# morphologizer documentation: https://spacy.io/api/morphologizer#add_label
if "textcat" not in nlp.pipe_names:
nlp.add_pipe(nlp.create_pipe("textcat", config={"architecture": "simple_cnn"}), last=True)
textcat = nlp.get_pipe("textcat")
textcat.add_label("pos") and textcat.add_label("neg")
with open('models/sentiment/models/test_data.pkl', 'wb') as f:
pickle.dump(test_data, f)
# code to exclude useless pipes from training
with nlp.disable_pipes([pipe for pipe in nlp.pipe_names if pipe != "textcat"]):
optimizer = nlp.begin_training()
batch_sizes = compounding(4.0, 32.0, 1.001)
for i in range(count):
shuffle(training_data)
batches, loss = minibatch(training_data, size=batch_sizes), {}
for batch in batches:
text, labels = zip(*batch) # batch is in the form [(text,label)] so we zip* and get a list for each
nlp.update(text, labels, drop=.2, sgd=optimizer, losses=loss)
with textcat.model.use_params(optimizer.averages):
results = evaluate_model(nlp.tokenizer, textcat, test_data)
txt_wrp = f'Model #{i+1}/{count}: Precision: {results["precision"]}, Recall: {results["recall"]}, F-Score: {results["f-score"]}, loss:{loss["textcat"]}.'
print(txt_wrp, end=' ')
results_txt.append(txt_wrp)
write_data_to_csv(results, loss, i)
# uncomment to save model "BE CAREFUL MAY DESTROY PREVIOUS MODEL"
save_model(nlp, optimizer, f'models/sentiment/models/model{i+1}')
with open('models/sentiment/results.txt', 'w') as f:
for result in results_txt:
f.write(result + '\n')
def evaluate_model(tokenizer: Tokenizer, textcat: Morphologizer, test_data: list) -> dict:
"""
evaluate the model to see if it is worthwhile to save the model
"""
true_positives = true_negatives = 0
false_positives = false_negatives = 1e-8 # near 0 to avoid /0 $$ textcat.pipe(tokenizer(x[0])).cats['pos'],
tokens, labels = zip(*map(lambda x: (tokenizer(x[0]), x[1]['cats']), test_data))
for score, true_label in zip([i.cats['pos'] for i in textcat.pipe(tokens)], labels):
if score >= 0.5 and true_label["pos"]:
true_positives += 1
elif score >= 0.5 and true_label["neg"]:
false_positives += 1
elif score < 0.5 and true_label["neg"]:
true_negatives += 1
elif score < 0.5 and true_label["pos"]:
false_negatives += 1
precision = true_positives / (true_positives + false_positives)
recall = true_positives / (true_positives + false_negatives)
f_score = 2 * (precision * recall) / (precision + recall) if precision + recall else 0
return {"precision": precision, "recall": recall, "f-score": f_score}
if __name__ == "__main__":
# Uncomment to retrain models
# DISCLAIMER: takes hours and overwrites other files
# data = grab_training_data(True)
# train_model(data[0], data[1], 25)
# python-ta
import python_ta
# python_ta.check_all(config={
# 'max-line-length': 120, # 100 was too short for nested code sections
# 'disable': ['R1705', 'C0200']
# })
| 42.660714
| 165
| 0.646993
|
794c8ab042c5f06e5b6f490a3b743929cc218205
| 15,515
|
py
|
Python
|
contrib/dokku-installer.py
|
Lieunoir/dokku
|
aa13be80a39e764c159fab577e377b362c5f6588
|
[
"MIT"
] | null | null | null |
contrib/dokku-installer.py
|
Lieunoir/dokku
|
aa13be80a39e764c159fab577e377b362c5f6588
|
[
"MIT"
] | null | null | null |
contrib/dokku-installer.py
|
Lieunoir/dokku
|
aa13be80a39e764c159fab577e377b362c5f6588
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import cgi
import json
import os
import re
import shutil
try:
import SimpleHTTPServer
import SocketServer
except ImportError:
import http.server as SimpleHTTPServer
import socketserver as SocketServer
import subprocess
import sys
import threading
VERSION = 'v0.23.7'
def bytes_to_string(b):
if type(b) == bytes:
encoding = sys.stdout.encoding
if encoding is None:
encoding = 'utf-8'
b = b.decode(encoding)
b = b.strip()
return b
def string_to_bytes(s):
if type(s) == str:
encoding = sys.stdout.encoding
if encoding is None:
encoding = 'utf-8'
s = s.encode(encoding)
return s
hostname = ''
try:
command = "bash -c '[[ $(dig +short $HOSTNAME) ]] && echo $HOSTNAME || wget -q -O - icanhazip.com'"
hostname = bytes_to_string(subprocess.check_output(command, shell=True))
except subprocess.CalledProcessError:
pass
key_file = os.getenv('KEY_FILE', None)
if os.path.isfile('/home/ec2-user/.ssh/authorized_keys'):
key_file = '/home/ec2-user/.ssh/authorized_keys'
elif os.path.isfile('/home/ubuntu/.ssh/authorized_keys'):
key_file = '/home/ubuntu/.ssh/authorized_keys'
else:
key_file = '/root/.ssh/authorized_keys'
admin_keys = []
if os.path.isfile(key_file):
try:
command = "cat {0}".format(key_file)
admin_keys = bytes_to_string(subprocess.check_output(command, shell=True)).strip().split("\n")
except subprocess.CalledProcessError:
pass
ufw_display = 'block'
try:
command = "sudo ufw status"
ufw_output = bytes_to_string(subprocess.check_output(command, shell=True).strip())
if "inactive" in ufw_output:
ufw_display = 'none'
except subprocess.CalledProcessError:
ufw_display = 'none'
nginx_dir = '/etc/nginx'
nginx_init = '/etc/init.d/nginx'
try:
command = "test -x /usr/bin/openresty"
subprocess.check_output(command, shell=True)
nginx_dir = '/usr/local/openresty/nginx/conf'
nginx_init = '/etc/init.d/openresty'
except subprocess.CalledProcessError:
pass
def check_boot():
if 'onboot' not in sys.argv:
return
init_dir = os.getenv('INIT_DIR', '/etc/init')
systemd_dir = os.getenv('SYSTEMD_DIR', '/etc/systemd/system')
nginx_conf_dir = os.getenv('NGINX_CONF_DIR', '{0}/conf.d'.format(nginx_dir))
if os.path.exists(init_dir):
with open('{0}/dokku-installer.conf'.format(init_dir), 'w') as f:
f.write("start on runlevel [2345]\n")
f.write("exec {0} selfdestruct\n".format(os.path.abspath(__file__)))
if os.path.exists(systemd_dir):
with open('{0}/dokku-installer.service'.format(systemd_dir), 'w') as f:
f.write("[Unit]\n")
f.write("Description=Dokku web-installer\n")
f.write("\n")
f.write("[Service]\n")
f.write("ExecStart={0} selfdestruct\n".format(os.path.abspath(__file__)))
f.write("\n")
f.write("[Install]\n")
f.write("WantedBy=multi-user.target\n")
f.write("WantedBy=graphical.target\n")
if os.path.exists(nginx_conf_dir):
with open('{0}/dokku-installer.conf'.format(nginx_conf_dir), 'w') as f:
f.write("upstream dokku-installer { server 127.0.0.1:2000; }\n")
f.write("server {\n")
f.write(" listen 80;\n")
f.write(" location / {\n")
f.write(" proxy_pass http://dokku-installer;\n")
f.write(" }\n")
f.write("}\n")
subprocess.call('rm -f {0}/sites-enabled/*'.format(nginx_dir), shell=True)
sys.exit(0)
class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def write_content(self, content):
try:
self.wfile.write(content)
except TypeError:
self.wfile.write(string_to_bytes(content))
def do_GET(self):
content = PAGE.replace('{VERSION}', VERSION)
content = content.replace('{UFW_DISPLAY}', ufw_display)
content = content.replace('{HOSTNAME}', hostname)
content = content.replace('{AUTHORIZED_KEYS_LOCATION}', key_file)
content = content.replace('{ADMIN_KEYS}', "\n".join(admin_keys))
self.send_response(200)
self.end_headers()
self.write_content(content)
def do_POST(self):
if self.path not in ['/setup', '/setup/']:
return
params = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type']})
dokku_root = os.getenv('DOKKU_ROOT', '/home/dokku')
dokku_user = os.getenv('DOKKU_SYSTEM_GROUP', 'dokku')
dokku_group = os.getenv('DOKKU_SYSTEM_USER', 'dokku')
vhost_enable = 'false'
vhost_filename = '{0}/VHOST'.format(dokku_root)
if 'vhost' in params and params['vhost'].value == 'true':
vhost_enable = 'true'
with open(vhost_filename, 'w') as f:
f.write(params['hostname'].value.strip("/"))
shutil.chown(vhost_filename, dokku_user, dokku_group)
else:
try:
os.remove(vhost_filename)
except OSError:
pass
hostname_filename = '{0}/HOSTNAME'.format(dokku_root)
with open(hostname_filename, 'w') as f:
f.write(params['hostname'].value.strip("/"))
shutil.chown(hostname_filename, dokku_user, dokku_group)
for (index, key) in enumerate(params['keys'].value.splitlines(), 1):
user = 'admin'
if self.admin_user_exists() is not None:
user = 'web-admin'
if self.web_admin_user_exists() is not None:
index = int(self.web_admin_user_exists()) + 1
elif self.web_admin_user_exists() is None:
index = 1
elif self.admin_user_exists() is None:
pass
else:
index = int(self.admin_user_exists()) + 1
user = user + str(index)
command = ['sshcommand', 'acl-add', 'dokku', user]
proc = subprocess.Popen(command, stdin=subprocess.PIPE)
try:
proc.stdin.write(key)
except TypeError:
proc.stdin.write(string_to_bytes(key))
proc.stdin.close()
proc.wait()
set_debconf_selection('boolean', 'nginx_enable', 'true')
set_debconf_selection('boolean', 'skip_key_file', 'true')
set_debconf_selection('boolean', 'vhost_enable', vhost_enable)
set_debconf_selection('boolean', 'web_config', 'false')
set_debconf_selection('string', 'hostname', params['hostname'].value)
if 'selfdestruct' in sys.argv:
DeleteInstallerThread()
content = json.dumps({'status': 'ok'})
self.send_response(200)
self.end_headers()
self.write_content(content)
def web_admin_user_exists(self):
return self.user_exists('web-admin(\d+)')
def admin_user_exists(self):
return self.user_exists('admin(\d+)')
def user_exists(self, name):
command = 'dokku ssh-keys:list'
pattern = re.compile(r'NAME="' + name + '"')
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
max_num = 0
exists = False
for line in proc.stdout:
m = pattern.search(bytes_to_string(line))
if m:
# User of the form `user` or `user#` exists
exists = True
max_num = max(max_num, int(m.group(1)))
if exists:
return max_num
else:
return None
def set_debconf_selection(debconf_type, key, value):
found = False
with open('/etc/os-release', 'r') as f:
for line in f:
if 'debian' in line:
found = True
if not found:
return
ps = subprocess.Popen(['echo', 'dokku dokku/{0} {1} {2}'.format(
key, debconf_type, value
)], stdout=subprocess.PIPE)
try:
subprocess.check_output(['debconf-set-selections'], stdin=ps.stdout)
except subprocess.CalledProcessError:
pass
ps.wait()
class DeleteInstallerThread(object):
def __init__(self, interval=1):
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
command = "rm {0}/conf.d/dokku-installer.conf && {1} stop && {1} start".format(nginx_dir, nginx_init)
try:
subprocess.call(command, shell=True)
except:
pass
command = "rm -f /etc/init/dokku-installer.conf /etc/systemd/system/dokku-installer.service && (stop dokku-installer || systemctl stop dokku-installer.service)"
try:
subprocess.call(command, shell=True)
except:
pass
def main():
check_boot()
port = int(os.getenv('PORT', 2000))
httpd = SocketServer.TCPServer(("", port), GetHandler)
print("Listening on 0.0.0.0:{0}, CTRL+C to stop".format(port))
httpd.serve_forever()
PAGE = """
<html>
<head>
<meta charset="utf-8" />
<title>Dokku Setup</title>
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css" integrity="sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO" crossorigin="anonymous">
<style>
.bd-callout {
padding: 1.25rem;
margin-top: 1.25rem;
margin-bottom: 1.25rem;
border: 1px solid #eee;
border-left-width: .25rem;
border-radius: .25rem;
}
.bd-callout p:last-child {
margin-bottom: 0;
}
.bd-callout-info {
border-left-color: #5bc0de;
}
pre {
font-size: 80%;
margin-bottom: 0;
}
h1 small {
font-size: 50%;
}
h5 {
font-size: 1rem;
}
.container {
width: 640px;
}
.result {
padding-left: 20px;
}
input.form-control, textarea.form-control {
background-color: #fafbfc;
font-size: 14px;
}
input.form-control::placeholder, textarea.form-control::placeholder {
color: #adb2b8
}
</style>
</head>
<body>
<div class="container">
<form id="form" role="form">
<h1 class="pt-3">Dokku Setup <small class="text-muted">{VERSION}</small></h1>
<div class="alert alert-warning small" role="alert">
<strong>Warning:</strong> The SSH key filled out here can grant root access to the server. Please complete the setup as soon as possible.
</div>
<div class="row">
<div class="col">
<h3>Admin Access</h3>
<div class="form-group">
<label for="key">Public SSH Keys</label><br />
<textarea class="form-control" name="keys" rows="5" id="key" placeholder="Begins with 'ssh-rsa', 'ssh-dss', 'ssh-ed25519', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384', or 'ecdsa-sha2-nistp521'">{ADMIN_KEYS}</textarea>
<small class="form-text text-muted">Public keys allow users to ssh onto the server as the <code>dokku</code> user, as well as remotely execute Dokku commands. They are currently auto-populated from: <code>{AUTHORIZED_KEYS_LOCATION}</code>, and can be changed later via the <a href="http://dokku.viewdocs.io/dokku/deployment/user-management/" target="_blank"><code>dokku ssh-keys</code></a> plugin.</small>
</div>
</div>
</div>
<div class="row">
<div class="col">
<h3>Hostname Configuration</h3>
<div class="form-group">
<label for="hostname">Hostname</label>
<input class="form-control" type="text" id="hostname" name="hostname" value="{HOSTNAME}" placeholder="A hostname or ip address such as {HOSTNAME}" />
<small class="form-text text-muted">This will be used as the default host for all applications, and can be changed later via the <a href="http://dokku.viewdocs.io/dokku/configuration/domains/" target="_blank"><code>dokku domains:set-global</code></a> command.</small>
</div>
<div class="form-check">
<input class="form-check-input" type="checkbox" id="vhost" name="vhost" value="true">
<label class="form-check-label" for="vhost">Use virtualhost naming for apps</label>
<small class="form-text text-muted">When enabled, Nginx will be run on port 80 and proxy requests to apps based on hostname.</small>
<small class="form-text text-muted">When disabled, a specific port will be setup for each application on first deploy, and requests to that port will be proxied to the relevant app.</small>
</div>
<div class="alert alert-warning small mt-3 d-{UFW_DISPLAY}" role="alert">
<strong>Warning:</strong> UFW is active. To allow traffic to specific ports, run <code>sudo ufw allow PORT</code> for the port in question.
</div>
<div class="bd-callout bd-callout-info">
<h5>What will app URLs look like?</h5>
<pre><code id="example">http://hostname:port</code></pre>
</div>
</div>
</div>
<button type="button" onclick="setup()" class="btn btn-primary">Finish Setup</button> <span class="result"></span>
</form>
</div>
<div id="error-output"></div>
<script>
var $ = document.querySelector.bind(document)
function setup() {
if ($("#key").value.trim() == "") {
alert("Your admin public key cannot be blank.")
return
}
if ($("#hostname").value.trim() == "") {
alert("Your hostname cannot be blank.")
return
}
var data = new FormData($("#form"))
var inputs = [].slice.call(document.querySelectorAll("input, textarea, button"))
inputs.forEach(function (input) {
input.disabled = true
})
var result = $(".result")
fetch("/setup", {method: "POST", body: data})
.then(function(response) {
if (response.ok) {
return response.json()
} else {
throw new Error('Server returned error')
}
})
.then(function(response) {
result.classList.add("text-success");
result.textContent = "Success! Redirecting in 3 seconds. .."
setTimeout(function() {
window.location.href = "http://dokku.viewdocs.io/dokku~{VERSION}/deployment/application-deployment/";
}, 3000);
})
.catch(function (error) {
result.classList.add("text-danger");
result.textContent = "Could not send the request"
})
}
function update() {
if ($("#vhost").matches(":checked") && $("#hostname").value.match(/^(\d{1,3}\.){3}\d{1,3}$/)) {
alert("In order to use virtualhost naming, the hostname must not be an IP but a valid domain name.")
$("#vhost").checked = false;
}
if ($("#vhost").matches(':checked')) {
$("#example").textContent = "http://<app-name>."+$("#hostname").value
} else {
$("#example").textContent = "http://"+$("#hostname").value+":<app-port>"
}
}
$("#vhost").addEventListener("change", update);
$("#hostname").addEventListener("input", update);
update();
</script>
</body>
</html>
"""
if __name__ == "__main__":
main()
| 35.341686
| 418
| 0.594135
|
794c8c145ef375791bc4971551a65862414b5af7
| 120
|
py
|
Python
|
hackerrank/contest/wfr2016_a.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | 1
|
2018-11-12T15:18:55.000Z
|
2018-11-12T15:18:55.000Z
|
hackerrank/contest/wfr2016_a.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
hackerrank/contest/wfr2016_a.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
N = int(input())
ans = []
i = 0
while N >> i:
if (N >> i) & 1:
ans.append(i+1)
i += 1
print(*ans[::-1])
| 13.333333
| 23
| 0.4
|
794c8c21c46d0edd2c83d50f41f9005f72c75342
| 10,389
|
py
|
Python
|
aws_topology/tests/conftest.py
|
StackVista/stackstate-agent-integrations
|
93f07da060ad09a36361cd8f3a037a531e9bb74e
|
[
"BSD-3-Clause"
] | 2
|
2020-03-10T13:21:37.000Z
|
2021-04-01T07:52:16.000Z
|
aws_topology/tests/conftest.py
|
StackVista/stackstate-agent-integrations
|
93f07da060ad09a36361cd8f3a037a531e9bb74e
|
[
"BSD-3-Clause"
] | 33
|
2020-02-05T16:18:32.000Z
|
2022-03-21T14:08:04.000Z
|
aws_topology/tests/conftest.py
|
StackVista/stackstate-agent-integrations
|
93f07da060ad09a36361cd8f3a037a531e9bb74e
|
[
"BSD-3-Clause"
] | 7
|
2020-03-10T13:21:39.000Z
|
2021-03-11T07:16:44.000Z
|
# (C) StackState 2021
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
import unittest
import os
import json
from mock import patch
from stackstate_checks.base.stubs import topology as top, aggregator
from stackstate_checks.aws_topology import AwsTopologyCheck, InitConfig
from stackstate_checks.base import AgentCheck
import botocore
import hashlib
from datetime import datetime, timedelta
import pytz
REGION = "test-region"
KEY_ID = "1234"
ACCESS_KEY = "5678"
ACCOUNT_ID = "123456789012"
WRONG_ACCOUNT_ID = "987654321012"
ROLE = "some_role_with_many_characters"
TOKEN = "ABCDE"
API_RESULTS = {
"AssumeRole": {"Credentials": {"AccessKeyId": KEY_ID, "SecretAccessKey": ACCESS_KEY, "SessionToken": TOKEN}},
"GetCallerIdentity": {
"Account": ACCOUNT_ID,
},
}
@pytest.fixture(scope="session")
def sts_environment():
# This conf instance is used when running `checksdev env start mycheck myenv`.
# The start command places this as a `conf.yaml` in the `conf.d/mycheck/` directory.
# If you want to run an environment this object can not be empty.
return {
"role_arn": "arn:aws:iam::123456789012:role/RoleName",
"regions": ["eu-west-1"],
}
@pytest.fixture(scope="class")
def instance(request):
cfg = {
"role_arn": "arn:aws:iam::123456789012:role/RoleName",
"regions": ["eu-west-1"],
}
request.cls.instance = cfg
@pytest.fixture(scope="class")
def init_config(request):
cfg = {"aws_access_key_id": "abc", "aws_secret_access_key": "cde", "external_id": "randomvalue"}
request.cls.config = cfg
@pytest.fixture(scope="class")
def init_config_override(request):
cfg = {
"aws_access_key_id": "abc",
"aws_secret_access_key": "cde",
"external_id": "disable_external_id_this_is_unsafe",
}
request.cls.config = cfg
def set_not_authorized(value):
def inner(func):
func.not_authorized = value
return func
return inner
def set_cloudtrail_event(value):
def inner(func):
func.cloudtrail_event = value
return func
return inner
def set_eventbridge_event(value):
def inner(func):
func.eventbridge_event = value
return func
return inner
def set_filter(value):
def inner(func):
func.filter = value
return func
return inner
def use_subdirectory(value):
def inner(func):
func.subdirectory = value
return func
return inner
def get_params_hash(region, data):
return hashlib.md5((region + json.dumps(data, sort_keys=True, default=str)).encode("utf-8")).hexdigest()[0:7]
def relative_path(path):
script_dir = os.path.dirname(__file__)
return os.path.join(script_dir, path)
def resource(path):
with open(relative_path(path)) as f:
x = json.load(f)
return x
def get_bytes_from_file(path):
return open(relative_path(path), "rb").read()
def use_gz(value):
def inner(func):
func.gz = value
return func
return inner
def set_log_bucket_name(value):
def inner(func):
func.log_bucket_name = value
return func
return inner
def wrapper(api, not_authorized, subdirectory, event_name=None, eventbridge_event_name=None):
def mock_boto_calls(self, *args, **kwargs):
operation_name = botocore.xform_name(args[0])
if operation_name == "assume_role":
return {"Credentials": {"AccessKeyId": "KEY_ID", "SecretAccessKey": "ACCESS_KEY", "SessionToken": "TOKEN"}}
if event_name:
if operation_name == "lookup_events":
res = resource("json/" + api + "/cloudtrail/" + event_name + ".json")
dt = datetime.utcnow() + timedelta(hours=3)
res["eventTime"] = dt.strftime("%Y-%m-%dT%H:%M:%SZ")
msg = {"Events": [{"CloudTrailEvent": json.dumps(res)}]}
return msg
if eventbridge_event_name:
if operation_name == "lookup_events":
return {}
if operation_name == "get_bucket_versioning":
return {"Status": "Enabled"}
if operation_name == "list_objects_v2":
return {
"Contents": [
{
"Key": "AWSLogs/123456789012/EventBridge/eu-west-1"
+ "/2021/06/11/05/stackstate-eventbridge-stream-2-2021-06-11-05-18-05-"
+ "b7d5fff3-928a-4e63-939b-1a32662b6a63.gz"
}
]
}
if operation_name == "get_object":
res = resource("json/" + api + "/cloudtrail/" + eventbridge_event_name + ".json")
return {"Body": json.dumps(res)}
if operation_name in not_authorized:
# Some APIs return a different error code when there is no permission
# But there are no docs on which ones do. Here is an array of some known APIs
if api in ["stepfunctions", "firehose"]:
error_code = "AccessDeniedException"
elif api == "ec2":
error_code = "UnauthorizedOperation"
elif api == "sns":
error_code = "AuthorizationError"
else:
error_code = "AccessDenied"
raise botocore.exceptions.ClientError({"Error": {"Code": error_code}}, operation_name)
apidir = api
if apidir is None:
apidir = self._service_model.service_name
directory = os.path.join("json", apidir, subdirectory)
file_name = "{}/{}_{}.json".format(directory, operation_name, get_params_hash(self.meta.region_name, args))
try:
result = resource(file_name)
# print('file: ', file_name)
# print('args: ', json.dumps(args, indent=2, default=str))
# print('meta: ', json.dumps(result["ResponseMetadata"]["Parameters"], indent=2, default=str))
except Exception:
error = "API response file not found for operation: {}\n".format(operation_name)
error += "Parameters:\n{}\n".format(json.dumps(args[1], indent=2, default=str))
error += "File missing: {}".format(file_name)
raise Exception(error)
# If an error code is included in the response metadata, raise this instead
if "Error" in result.get("ResponseMetadata", {}):
raise botocore.exceptions.ClientError({"Error": result["ResponseMetadata"]["Error"]}, operation_name)
else:
return result
return mock_boto_calls
class BaseApiTest(unittest.TestCase):
CHECK_NAME = "aws_topology"
SERVICE_CHECK_NAME = "aws_topology"
def get_api(self):
raise NotImplementedError
def get_account_id(self):
return "123456789012"
def get_region(self):
return "eu-west-1"
@staticmethod
def get_filter():
return ""
def setUp(self):
"""
Initialize and patch the check, i.e.
"""
method = getattr(self, self._testMethodName)
not_authorized = []
if hasattr(method, "not_authorized"):
not_authorized = method.not_authorized
cloudtrail_event = None
if hasattr(method, "cloudtrail_event"):
cloudtrail_event = method.cloudtrail_event
eventbridge_event = None
if hasattr(method, "eventbridge_event"):
eventbridge_event = method.eventbridge_event
filter = ""
if hasattr(method, "filter"):
filter = method.filter
subdirectory = ""
if hasattr(method, "subdirectory"):
subdirectory = method.subdirectory
self.patcher = patch("botocore.client.BaseClient._make_api_call", autospec=True)
self.mock_object = self.patcher.start()
top.reset()
aggregator.reset()
init_config = InitConfig(
{
"aws_access_key_id": "some_key",
"aws_secret_access_key": "some_secret",
"external_id": "disable_external_id_this_is_unsafe",
}
)
regions = self.get_region()
if not isinstance(regions, list):
regions = [regions]
instance = {
"role_arn": "arn:aws:iam::{}:role/RoleName".format(self.get_account_id()),
"regions": regions
}
api = self.get_api()
apis = None
if api:
if filter:
apis = [api + "|" + filter]
else:
apis = [api]
if cloudtrail_event:
apis = []
instance.update({"apis_to_run": apis})
self.check = AwsTopologyCheck(self.CHECK_NAME, InitConfig(init_config), [instance])
self.check.last_full_topology = datetime(2021, 5, 1, 0, 0, 0).replace(tzinfo=pytz.utc)
def ignore_callback(self, *args, **kwargs):
return
self.check.get_flowlog_update = ignore_callback
if cloudtrail_event is None and eventbridge_event is None:
self.check.get_topology_update = ignore_callback
self.mock_object.side_effect = wrapper(
api, not_authorized, subdirectory, event_name=cloudtrail_event, eventbridge_event_name=eventbridge_event
)
self.components_checked = 0
self.relations_checked = 0
def tearDown(self):
self.patcher.stop()
def assert_executed_ok(self):
service_checks = aggregator.service_checks(self.check.SERVICE_CHECK_EXECUTE_NAME)
self.assertGreater(len(service_checks), 0)
self.assertEqual(service_checks[0].status, AgentCheck.OK, service_checks[0].message)
def assert_updated_ok(self):
service_checks = aggregator.service_checks(self.check.SERVICE_CHECK_UPDATE_NAME)
self.assertGreater(len(service_checks), 0)
self.assertEqual(service_checks[0].status, AgentCheck.OK, service_checks[0].message)
def assert_location_info(self, component):
self.assertEqual(component["data"]["Location"]["AwsAccount"], self.get_account_id())
region = self.get_region()
if component["type"] == "aws.route53.domain" or component["type"] == "aws.route53.hostedzone":
region = "us-east-1"
self.assertEqual(component["data"]["Location"]["AwsRegion"], region)
| 33.512903
| 119
| 0.618731
|
794c8c54ade42fa21ffe9dd7582d657aad49c03c
| 12,552
|
py
|
Python
|
saleor/graphql/shop/types.py
|
Tobos20/saleor
|
ecb43299360d282733d36df9a9b1488a3d8ed7dd
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/shop/types.py
|
Tobos20/saleor
|
ecb43299360d282733d36df9a9b1488a3d8ed7dd
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/shop/types.py
|
Tobos20/saleor
|
ecb43299360d282733d36df9a9b1488a3d8ed7dd
|
[
"CC-BY-4.0"
] | null | null | null |
from typing import Optional
import graphene
from django.conf import settings
from django.utils import translation
from django_countries import countries
from django_prices_vatlayer.models import VAT
from phonenumbers import COUNTRY_CODE_TO_REGION_CODE
from ...account import models as account_models
from ...core.permissions import SitePermissions, get_permissions
from ...core.utils import get_client_ip, get_country_by_ip
from ...plugins.manager import get_plugins_manager
from ...site import models as site_models
from ..account.types import Address, StaffNotificationRecipient
from ..channel import ChannelContext
from ..checkout.types import PaymentGateway
from ..core.enums import WeightUnitsEnum
from ..core.types.common import CountryDisplay, LanguageDisplay, Permission
from ..core.utils import str_to_enum
from ..decorators import permission_required
from ..menu.dataloaders import MenuByIdLoader
from ..menu.types import Menu
from ..translations.enums import LanguageCodeEnum
from ..translations.fields import TranslationField
from ..translations.resolvers import resolve_translation
from ..translations.types import ShopTranslation
from ..utils import format_permissions_for_display
from .enums import AuthorizationKeyType
class Navigation(graphene.ObjectType):
main = graphene.Field(Menu, description="Main navigation bar.")
secondary = graphene.Field(Menu, description="Secondary navigation bar.")
class Meta:
description = "Represents shop's navigation menus."
class AuthorizationKey(graphene.ObjectType):
name = AuthorizationKeyType(
description="Name of the authorization backend.", required=True
)
key = graphene.String(description="Authorization key (client ID).", required=True)
class Domain(graphene.ObjectType):
host = graphene.String(description="The host name of the domain.", required=True)
ssl_enabled = graphene.Boolean(
description="Inform if SSL is enabled.", required=True
)
url = graphene.String(description="Shop's absolute URL.", required=True)
class Meta:
description = "Represents shop's domain."
class Geolocalization(graphene.ObjectType):
country = graphene.Field(
CountryDisplay, description="Country of the user acquired by his IP address."
)
class Meta:
description = "Represents customers's geolocalization data."
class Shop(graphene.ObjectType):
available_payment_gateways = graphene.List(
graphene.NonNull(PaymentGateway),
currency=graphene.Argument(
graphene.String,
description="A currency for which gateways will be returned.",
required=False,
),
description="List of available payment gateways.",
required=True,
)
geolocalization = graphene.Field(
Geolocalization, description="Customer's geolocalization data."
)
authorization_keys = graphene.List(
AuthorizationKey,
description=(
"List of configured authorization keys. Authorization keys are used to "
"enable third-party OAuth authorization (currently Facebook or Google)."
),
required=True,
)
countries = graphene.List(
graphene.NonNull(CountryDisplay),
language_code=graphene.Argument(
LanguageCodeEnum,
description="A language code to return the translation for.",
),
description="List of countries available in the shop.",
required=True,
)
currencies = graphene.List(
graphene.String,
description="List of available currencies.",
required=True,
deprecation_reason="This field will be removed in Saleor 3.0",
)
default_currency = graphene.String(
description="Shop's default currency.",
required=True,
deprecation_reason="This field will be removed in Saleor 3.0",
)
default_country = graphene.Field(
CountryDisplay, description="Shop's default country."
)
default_mail_sender_name = graphene.String(
description="Default shop's email sender's name."
)
default_mail_sender_address = graphene.String(
description="Default shop's email sender's address."
)
description = graphene.String(description="Shop's description.")
domain = graphene.Field(Domain, required=True, description="Shop's domain data.")
languages = graphene.List(
LanguageDisplay,
description="List of the shops's supported languages.",
required=True,
)
name = graphene.String(description="Shop's name.", required=True)
navigation = graphene.Field(
Navigation,
description="Shop's navigation.",
deprecation_reason="Fetch menus using the `menu` query with `slug` parameter.",
)
permissions = graphene.List(
Permission, description="List of available permissions.", required=True
)
phone_prefixes = graphene.List(
graphene.String, description="List of possible phone prefixes.", required=True
)
header_text = graphene.String(description="Header text.")
include_taxes_in_prices = graphene.Boolean(
description="Include taxes in prices.", required=True
)
display_gross_prices = graphene.Boolean(
description="Display prices with tax in store.", required=True
)
charge_taxes_on_shipping = graphene.Boolean(
description="Charge taxes on shipping.", required=True
)
track_inventory_by_default = graphene.Boolean(
description="Enable inventory tracking."
)
default_weight_unit = WeightUnitsEnum(description="Default weight unit.")
translation = TranslationField(ShopTranslation, type_name="shop", resolver=None)
automatic_fulfillment_digital_products = graphene.Boolean(
description="Enable automatic fulfillment for all digital products."
)
default_digital_max_downloads = graphene.Int(
description="Default number of max downloads per digital content URL."
)
default_digital_url_valid_days = graphene.Int(
description="Default number of days which digital content URL will be valid."
)
company_address = graphene.Field(
Address, description="Company address.", required=False
)
customer_set_password_url = graphene.String(
description="URL of a view where customers can set their password.",
required=False,
)
staff_notification_recipients = graphene.List(
StaffNotificationRecipient,
description="List of staff notification recipients.",
required=False,
)
class Meta:
description = (
"Represents a shop resource containing general shop data and configuration."
)
@staticmethod
def resolve_available_payment_gateways(_, _info, currency: Optional[str] = None):
return get_plugins_manager().list_payment_gateways(currency=currency)
@staticmethod
@permission_required(SitePermissions.MANAGE_SETTINGS)
def resolve_authorization_keys(_, _info):
return site_models.AuthorizationKey.objects.all()
@staticmethod
def resolve_countries(_, _info, language_code=None):
taxes = {vat.country_code: vat for vat in VAT.objects.all()}
with translation.override(language_code):
return [
CountryDisplay(
code=country[0], country=country[1], vat=taxes.get(country[0])
)
for country in countries
]
@staticmethod
def resolve_domain(_, info):
site = info.context.site
return Domain(
host=site.domain,
ssl_enabled=settings.ENABLE_SSL,
url=info.context.build_absolute_uri("/"),
)
@staticmethod
def resolve_geolocalization(_, info):
client_ip = get_client_ip(info.context)
country = get_country_by_ip(client_ip)
if country:
return Geolocalization(
country=CountryDisplay(code=country.code, country=country.name)
)
return Geolocalization(country=None)
@staticmethod
def resolve_description(_, info):
return info.context.site.settings.description
@staticmethod
def resolve_languages(_, _info):
return [
LanguageDisplay(
code=LanguageCodeEnum[str_to_enum(language[0])], language=language[1]
)
for language in settings.LANGUAGES
]
@staticmethod
def resolve_name(_, info):
return info.context.site.name
@staticmethod
def resolve_navigation(_, info):
site_settings = info.context.site.settings
main = None
if site_settings.top_menu_id:
main = (
MenuByIdLoader(info.context)
.load(site_settings.top_menu_id)
.then(lambda menu: ChannelContext(node=menu, channel_slug=None))
)
secondary = None
if site_settings.bottom_menu_id:
secondary = (
MenuByIdLoader(info.context)
.load(site_settings.bottom_menu_id)
.then(lambda menu: ChannelContext(node=menu, channel_slug=None))
)
return Navigation(main=main, secondary=secondary)
@staticmethod
def resolve_permissions(_, _info):
permissions = get_permissions()
return format_permissions_for_display(permissions)
@staticmethod
def resolve_phone_prefixes(_, _info):
return list(COUNTRY_CODE_TO_REGION_CODE.keys())
@staticmethod
def resolve_header_text(_, info):
return info.context.site.settings.header_text
@staticmethod
def resolve_include_taxes_in_prices(_, info):
return info.context.site.settings.include_taxes_in_prices
@staticmethod
def resolve_display_gross_prices(_, info):
return info.context.site.settings.display_gross_prices
@staticmethod
def resolve_charge_taxes_on_shipping(_, info):
return info.context.site.settings.charge_taxes_on_shipping
@staticmethod
def resolve_track_inventory_by_default(_, info):
return info.context.site.settings.track_inventory_by_default
@staticmethod
def resolve_default_weight_unit(_, info):
return info.context.site.settings.default_weight_unit
@staticmethod
def resolve_default_country(_, _info):
default_country_code = settings.DEFAULT_COUNTRY
default_country_name = countries.countries.get(default_country_code)
if default_country_name:
vat = VAT.objects.filter(country_code=default_country_code).first()
default_country = CountryDisplay(
code=default_country_code, country=default_country_name, vat=vat
)
else:
default_country = None
return default_country
@staticmethod
@permission_required(SitePermissions.MANAGE_SETTINGS)
def resolve_default_mail_sender_name(_, info):
return info.context.site.settings.default_mail_sender_name
@staticmethod
@permission_required(SitePermissions.MANAGE_SETTINGS)
def resolve_default_mail_sender_address(_, info):
return info.context.site.settings.default_mail_sender_address
@staticmethod
def resolve_company_address(_, info):
return info.context.site.settings.company_address
@staticmethod
def resolve_customer_set_password_url(_, info):
return info.context.site.settings.customer_set_password_url
@staticmethod
def resolve_translation(_, info, language_code):
return resolve_translation(info.context.site.settings, info, language_code)
@staticmethod
@permission_required(SitePermissions.MANAGE_SETTINGS)
def resolve_automatic_fulfillment_digital_products(_, info):
site_settings = info.context.site.settings
return site_settings.automatic_fulfillment_digital_products
@staticmethod
@permission_required(SitePermissions.MANAGE_SETTINGS)
def resolve_default_digital_max_downloads(_, info):
return info.context.site.settings.default_digital_max_downloads
@staticmethod
@permission_required(SitePermissions.MANAGE_SETTINGS)
def resolve_default_digital_url_valid_days(_, info):
return info.context.site.settings.default_digital_url_valid_days
@staticmethod
@permission_required(SitePermissions.MANAGE_SETTINGS)
def resolve_staff_notification_recipients(_, info):
return account_models.StaffNotificationRecipient.objects.all()
| 36.488372
| 88
| 0.704908
|
794c8d7e10e60b227fde22fbc2285c068044edec
| 1,727
|
py
|
Python
|
daemons/useq_nextcloud_monitor.py
|
UMCUGenetics/USEQ_tools
|
fb003c34b965c9648a6c4dc960235f0fee526ac0
|
[
"MIT"
] | null | null | null |
daemons/useq_nextcloud_monitor.py
|
UMCUGenetics/USEQ_tools
|
fb003c34b965c9648a6c4dc960235f0fee526ac0
|
[
"MIT"
] | 2
|
2021-03-31T20:12:34.000Z
|
2021-11-22T14:33:22.000Z
|
daemons/useq_nextcloud_monitor.py
|
UMCUGenetics/USEQ_tools
|
fb003c34b965c9648a6c4dc960235f0fee526ac0
|
[
"MIT"
] | null | null | null |
from config import NEXTCLOUD_HOST,NEXTCLOUD_WEBDAV_ROOT,NEXTCLOUD_RAW_DIR,NEXTCLOUD_PROCESSED_DIR,NEXTCLOUD_LOG_DIR,MAIL_SENDER, NEXTCLOUD_USER, NEXTCLOUD_PW, NEXTCLOUD_STORAGE, NEXTCLOUD_MAX,MAIL_SENDER,MAIL_ADMINS,NEXTCLOUD_MANUAL_DIR
from modules.useq_nextcloud import NextcloudUtil
from modules.useq_template import TEMPLATE_PATH,TEMPLATE_ENVIRONMENT,renderTemplate
from modules.useq_mail import sendMail
from datetime import datetime
def convertFileSize(size,precision=2):
suffixes=['B','KB','MB','GB','TB']
suffixIndex = 0
while size > 1024 and suffixIndex < 4:
suffixIndex += 1 #increment the index of the suffix
size = size/1024.0 #apply the division
return "%.*f%s"%(precision,size,suffixes[suffixIndex])
def checkUsage():
files = nextcloud_util.fileList()
files_to_delete =[]
total_size = 0
for file in files:
total_size += files[file]['size']
files[file]['size'] = convertFileSize(files[file]['size'])
usage = convertFileSize(total_size)
subject = 'Nextcloud overview of directory {0}'.format(nextcloud_util.run_dir)
data = {
'total_usage' : usage,
'files' : files,
'dir' : nextcloud_util.run_dir
}
content = renderTemplate('nextcloud_overview.html', data)
sendMail(subject,content,MAIL_SENDER,MAIL_ADMINS)
def run():
global nextcloud_util
#Set up nextcloud
nextcloud_util = NextcloudUtil()
nextcloud_util.setHostname( NEXTCLOUD_HOST )
nextcloud_util.setup( NEXTCLOUD_USER, NEXTCLOUD_PW, NEXTCLOUD_WEBDAV_ROOT,NEXTCLOUD_RAW_DIR,MAIL_SENDER )
checkUsage()
nextcloud_util.setup( NEXTCLOUD_USER, NEXTCLOUD_PW, NEXTCLOUD_WEBDAV_ROOT,NEXTCLOUD_MANUAL_DIR,MAIL_SENDER )
checkUsage()
| 36.744681
| 237
| 0.749855
|
794c8dec9aad8667eba821523e36efec533754e1
| 5,533
|
py
|
Python
|
scipy_central/filestorage/models.py
|
wqshi/test
|
63dc0c684ec749cd03e9c071176f30f439188f14
|
[
"BSD-3-Clause"
] | null | null | null |
scipy_central/filestorage/models.py
|
wqshi/test
|
63dc0c684ec749cd03e9c071176f30f439188f14
|
[
"BSD-3-Clause"
] | null | null | null |
scipy_central/filestorage/models.py
|
wqshi/test
|
63dc0c684ec749cd03e9c071176f30f439188f14
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models
from django.conf import settings
from scipy_central import utils
from django.utils.datastructures import SortedDict
from dvcs_wrapper import DVCSError, DVCSRepo
# Python imports
import os
import logging
import shutil
storage_dir = settings.SPC['storage_dir']
backend = settings.SPC['revisioning_backend']
revisioning_executable = settings.SPC['revisioning_executable']
logger = logging.getLogger('scipycentral')
logger.debug('Initializing filestorage::models.py')
class FileSet(models.Model):
"""
Every file-based submission is stored under revision control. This class
defines where those files are stored, creates that storage, and has
class methods to add files to the storage location.
"""
# Where will the files be stored. Path name should always end with a slash
repo_path = models.CharField(max_length=500)
created = models.DateTimeField(auto_now_add=True)
def save(self, *args, **kwargs):
""" Override the model's saving function to ensure the repo dir is
created. """
utils.ensuredir(os.path.join(storage_dir, self.repo_path))
super(FileSet, self).save(*args, **kwargs)
def create_empty(self):
"""
Create an empty repo (``init``) and returns it.
"""
repo = DVCSRepo(backend, os.path.join(storage_dir, self.repo_path), do_init=True,
dvcs_executable=revisioning_executable)
# Save the location for next time
globals()['revisioning_executable'] = repo.executable
return repo
def add_file_from_string(self, filename, list_strings, commit_msg='',
user=None):
"""
Add a ``filename`` to the repo using the list of strings to create
the file. A commit will be written to the repo is ``commit_msg`` is not
empty.
"""
fname = os.path.join(storage_dir, self.repo_path, filename)
f = open(fname, 'w')
f.writelines(list_strings)
f.close()
repo = DVCSRepo(backend, os.path.join(storage_dir, self.repo_path),
dvcs_executable=revisioning_executable)
# Save the location for next time
globals()['revisioning_executable'] = repo.executable
# Only add this file
try:
repo.add([fname])
except DVCSError as e:
# Happens if a file with the same name already exists in the repo
if e.value == 'Could not add one or more files to repository.':
pass
else:
raise
if commit_msg:
repo.commit(commit_msg, user=user)
def add_file(self, pattern, commit_msg='', user=None, repo=None):
"""
Add a single file, or a file ``pattern``, to the repo.
A commit will be written to the repo if ``commit_msg`` is not empty.
"""
if repo is None:
repo = DVCSRepo(backend, os.path.join(storage_dir, self.repo_path),
do_init=False,
dvcs_executable=revisioning_executable)
try:
repo.add([pattern])
except DVCSError as e:
logger.error('DVCS error: %s' % e.original_message)
if commit_msg:
repo.commit(commit_msg, user=user)
def get_hash(self):
"""
Returns the current repo hash for this fileset
"""
repo = DVCSRepo(backend, os.path.join(storage_dir, self.repo_path),
do_init=False,
dvcs_executable=revisioning_executable)
return repo.get_revision_info()[0:60]
def get_repo(self):
"""
Returns the DVCS repo object
"""
return DVCSRepo(backend, os.path.join(storage_dir, self.repo_path),
dvcs_executable=revisioning_executable)
def checkout_revision(self, hash_id):
""" Set the repo state to the revision given by ``hash_id``
Equivalent, for e.g., to ``hg checkout 28ed0c6faa19`` for that hash_id.
"""
repo = DVCSRepo(backend, os.path.join(storage_dir, self.repo_path),
do_init=False,
dvcs_executable=revisioning_executable)
hash_str = repo.check_out(hash_id)
if hash_str==hash_id:
return repo
else:
return None
def list_iterator(self):
"""
COMMENT NOT TRUE YET:
Returns a list of all files in a repo. For example, if the repo has:
/dir1/abc.png
/dir1/def.png
/dir2/ <-- empty dir
/dir3/dir4/frg.png
/dir3/tyr.png
ghw.png
yqr.png
This function will return a Django ``SortedDict`` data structure:
[ {'dir1': ['abc.png', 'def.png'],
'dir2': [],
'dir3': [{'dir4': 'frg.png'}, 'tyr.png'],
},
'ghw.png',
'yqr.png'
]
"""
base_dir = os.path.join(storage_dir, self.repo_path)
for path, dirs, files in os.walk(base_dir):
dirname = os.path.split(path)[1]
if dirname in settings.SPC['common_rcs_dirs']:
for entry in dirs[:]:
dirs.remove(entry)
else:
for fname in files:
yield os.path.relpath(os.path.join(path, fname), base_dir)
def __unicode__(self):
return '<storage_dir>/' + self.repo_path
| 33.331325
| 89
| 0.586662
|
794c8fc6ee36d1e8e7ab648e588cd6acc7d8607b
| 9,001
|
py
|
Python
|
gui/qt/utxo_list.py
|
acidsploit/Electron-Cash
|
5f6799c97bc25dcb869720a61e8ba8d1b900db17
|
[
"MIT"
] | null | null | null |
gui/qt/utxo_list.py
|
acidsploit/Electron-Cash
|
5f6799c97bc25dcb869720a61e8ba8d1b900db17
|
[
"MIT"
] | null | null | null |
gui/qt/utxo_list.py
|
acidsploit/Electron-Cash
|
5f6799c97bc25dcb869720a61e8ba8d1b900db17
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .util import *
from electroncash.i18n import _
class UTXOList(MyTreeWidget):
filter_columns = [0, 2] # Address, Label
def __init__(self, parent=None):
MyTreeWidget.__init__(self, parent, self.create_menu, [ _('Address'), _('Label'), _('Amount'), _('Height'), _('Output point')], 1)
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setSortingEnabled(True)
# force attributes to always be defined, even if None, at construction.
self.wallet = self.parent.wallet if hasattr(self.parent, 'wallet') else None
self.utxos = list()
def get_name(self, x):
return x.get('prevout_hash') + ":%d"%x.get('prevout_n')
@rate_limited(1.0) # performance tweak -- limit updates to no more than oncer per second
def update(self):
if self.wallet and (not self.wallet.thread or not self.wallet.thread.isRunning()):
# short-cut return if window was closed and wallet is stopped
return
super().update()
def on_update(self):
prev_selection = self.get_selected() # cache previous selection, if any
self.clear()
self.wallet = self.parent.wallet
if not self.wallet: return
self.utxos = self.wallet.get_utxos()
for x in self.utxos:
address = x['address']
address_text = address.to_ui_string()
height = x['height']
name = self.get_name(x)
label = self.wallet.get_label(x['prevout_hash'])
amount = self.parent.format_amount(x['value'])
utxo_item = SortableTreeWidgetItem([address_text, label, amount,
str(height),
name[0:10] + '...' + name[-2:]])
utxo_item.DataRole = Qt.UserRole+100 # set this here to avoid sorting based on Qt.UserRole+1
utxo_item.setFont(0, QFont(MONOSPACE_FONT))
utxo_item.setFont(4, QFont(MONOSPACE_FONT))
utxo_item.setData(0, Qt.UserRole, name)
a_frozen = self.wallet.is_frozen(address)
c_frozen = x['is_frozen_coin']
if a_frozen and not c_frozen:
# address is frozen, coin is not frozen
# emulate the "Look" off the address_list .py's frozen entry
utxo_item.setBackground(0, QColor('lightblue'))
elif c_frozen and not a_frozen:
# coin is frozen, address is not frozen
utxo_item.setBackground(0, ColorScheme.BLUE.as_color(True))
elif c_frozen and a_frozen:
# both coin and address are frozen so color-code it to indicate that.
utxo_item.setBackground(0, QColor('lightblue'))
utxo_item.setForeground(0, QColor('#3399ff'))
# save the address-level-frozen and coin-level-frozen flags to the data item for retrieval later in create_menu() below.
utxo_item.setData(0, Qt.UserRole+1, "{}{}".format(("a" if a_frozen else ""), ("c" if c_frozen else "")))
self.addChild(utxo_item)
if name in prev_selection:
# NB: This needs to be here after the item is added to the widget. See #979.
utxo_item.setSelected(True) # restore previous selection
def get_selected(self):
return { x.data(0, Qt.UserRole) : x.data(0, Qt.UserRole+1) # dict of "name" -> frozen flags string (eg: "ac")
for x in self.selectedItems() }
def create_menu(self, position):
selected = self.get_selected()
if not selected:
return
menu = QMenu()
coins = filter(lambda x: self.get_name(x) in selected, self.utxos)
spendable_coins = list(filter(lambda x: not selected.get(self.get_name(x), ''), coins))
# Unconditionally add the "Spend" option but leave it disabled if there are no spendable_coins
menu.addAction(_("Spend"), lambda: self.parent.spend_coins(spendable_coins)).setEnabled(bool(spendable_coins))
if len(selected) == 1:
# single selection, offer them the "Details" option and also coin/address "freeze" status, if any
txid = list(selected.keys())[0].split(':')[0]
frozen_flags = list(selected.values())[0]
tx = self.wallet.transactions.get(txid)
if tx:
label = self.wallet.get_label(txid) or None
menu.addAction(_("Details"), lambda: self.parent.show_transaction(tx, label))
act = None
needsep = True
if 'c' in frozen_flags:
menu.addSeparator()
menu.addAction(_("Coin is frozen"), lambda: None).setEnabled(False)
menu.addAction(_("Unfreeze Coin"), lambda: self.set_frozen_coins(list(selected.keys()), False))
menu.addSeparator()
needsep = False
else:
menu.addAction(_("Freeze Coin"), lambda: self.set_frozen_coins(list(selected.keys()), True))
if 'a' in frozen_flags:
if needsep: menu.addSeparator()
menu.addAction(_("Address is frozen"), lambda: None).setEnabled(False)
menu.addAction(_("Unfreeze Address"), lambda: self.set_frozen_addresses_for_coins(list(selected.keys()), False))
else:
menu.addAction(_("Freeze Address"), lambda: self.set_frozen_addresses_for_coins(list(selected.keys()), True))
else:
# multi-selection
menu.addSeparator()
if any(['c' not in flags for flags in selected.values()]):
# they have some coin-level non-frozen in the selection, so add the menu action "Freeze coins"
menu.addAction(_("Freeze Coins"), lambda: self.set_frozen_coins(list(selected.keys()), True))
if any(['c' in flags for flags in selected.values()]):
# they have some coin-level frozen in the selection, so add the menu action "Unfreeze coins"
menu.addAction(_("Unfreeze Coins"), lambda: self.set_frozen_coins(list(selected.keys()), False))
if any(['a' not in flags for flags in selected.values()]):
# they have some address-level non-frozen in the selection, so add the menu action "Freeze addresses"
menu.addAction(_("Freeze Addresses"), lambda: self.set_frozen_addresses_for_coins(list(selected.keys()), True))
if any(['a' in flags for flags in selected.values()]):
# they have some address-level frozen in the selection, so add the menu action "Unfreeze addresses"
menu.addAction(_("Unfreeze Addresses"), lambda: self.set_frozen_addresses_for_coins(list(selected.keys()), False))
menu.exec_(self.viewport().mapToGlobal(position))
def on_permit_edit(self, item, column):
# disable editing fields in this tab (labels)
return False
def set_frozen_coins(self, coins, b):
if self.parent:
self.parent.set_frozen_coin_state(coins, b)
def set_frozen_addresses_for_coins(self, coins, b):
if not self.parent: return
addrs = set()
for utxo in self.utxos:
name = self.get_name(utxo)
if name in coins:
addrs.add(utxo['address'])
if addrs:
self.parent.set_frozen_state(list(addrs), b)
def update_labels(self):
root = self.invisibleRootItem()
child_count = root.childCount()
for i in range(child_count):
item = root.child(i)
try:
txid = item.data(0, Qt.UserRole).split(':', 1)[0]
except IndexError:
continue # name is iinvalid. should be txid:prevout_n
label = self.wallet.get_label(txid)
item.setText(1, label)
| 51.729885
| 138
| 0.627264
|
794c90c8c169cabde9aed5afa454f0aef4f237d4
| 4,568
|
py
|
Python
|
aioboto3/session.py
|
VeevaLabs/aioboto3
|
e760c336d4ef8c09df43e6bf919daf88ed1ef382
|
[
"Apache-2.0"
] | null | null | null |
aioboto3/session.py
|
VeevaLabs/aioboto3
|
e760c336d4ef8c09df43e6bf919daf88ed1ef382
|
[
"Apache-2.0"
] | null | null | null |
aioboto3/session.py
|
VeevaLabs/aioboto3
|
e760c336d4ef8c09df43e6bf919daf88ed1ef382
|
[
"Apache-2.0"
] | 1
|
2022-02-17T08:17:21.000Z
|
2022-02-17T08:17:21.000Z
|
# -*- coding: utf-8 -*-
"""
This class essentially overrides the boto3 session init, passing in
an async botocore session
"""
import aiobotocore.session
import boto3.session
import boto3.resources.base
import boto3.utils
from aioboto3.resources import AIOBoto3ResourceFactory
class Session(boto3.session.Session):
"""
A session stores configuration state and allows you to create service
clients and resources.
:type aws_access_key_id: string
:param aws_access_key_id: AWS access key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: AWS secret access key
:type aws_session_token: string
:param aws_session_token: AWS temporary session token
:type region_name: string
:param region_name: Default region when creating new connections
:type botocore_session: botocore.session.Session
:param botocore_session: Use this Botocore session instead of creating
a new default one.
:type profile_name: string
:param profile_name: The name of a profile to use. If not given, then
the default profile is used.
"""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
aws_session_token=None, region_name=None,
botocore_session=None, profile_name=None, loop=None):
if botocore_session is not None:
self._session = botocore_session
else:
# Create a new default session
self._session = aiobotocore.session.get_session(loop=loop)
# Setup custom user-agent string if it isn't already customized
if self._session.user_agent_name == 'Botocore':
botocore_info = 'Botocore/{0}'.format(
self._session.user_agent_version)
if self._session.user_agent_extra:
self._session.user_agent_extra += ' ' + botocore_info
else:
self._session.user_agent_extra = botocore_info
self._session.user_agent_name = 'Boto3'
self._session.user_agent_version = boto3.__version__
if profile_name is not None:
self._session.set_config_variable('profile', profile_name)
if aws_access_key_id or aws_secret_access_key or aws_session_token:
self._session.set_credentials(
aws_access_key_id, aws_secret_access_key, aws_session_token)
if region_name is not None:
self._session.set_config_variable('region', region_name)
self.resource_factory = AIOBoto3ResourceFactory(
self._session.get_component('event_emitter'))
self._setup_loader()
self._register_default_handlers()
def _register_default_handlers(self):
# S3 customizations
self._session.register(
'creating-client-class.s3',
boto3.utils.lazy_call(
'aioboto3.s3.inject.inject_s3_transfer_methods'))
self._session.register(
'creating-resource-class.s3.Bucket',
boto3.utils.lazy_call(
'boto3.s3.inject.inject_bucket_methods'))
self._session.register(
'creating-resource-class.s3.Object',
boto3.utils.lazy_call(
'boto3.s3.inject.inject_object_methods'))
self._session.register(
'creating-resource-class.s3.ObjectSummary',
boto3.utils.lazy_call(
'boto3.s3.inject.inject_object_summary_methods'))
# DynamoDb customizations
self._session.register(
'creating-resource-class.dynamodb',
boto3.utils.lazy_call(
'boto3.dynamodb.transform.register_high_level_interface'),
unique_id='high-level-dynamodb')
self._session.register(
'creating-resource-class.dynamodb.Table',
boto3.utils.lazy_call(
'aioboto3.dynamodb.table.register_table_methods'),
unique_id='high-level-dynamodb-table')
# EC2 Customizations
self._session.register(
'creating-resource-class.ec2.ServiceResource',
boto3.utils.lazy_call(
'boto3.ec2.createtags.inject_create_tags'))
self._session.register(
'creating-resource-class.ec2.Instance',
boto3.utils.lazy_call(
'boto3.ec2.deletetags.inject_delete_tags',
event_emitter=self.events))
def resource(self, *args, **kwargs):
result = super(Session, self).resource(*args, **kwargs)
return result
| 38.066667
| 76
| 0.652802
|
794c918ff378092d6106f7600fb011636d2d708e
| 739
|
py
|
Python
|
dapr/actor/runtime/reentrancy_context.py
|
willtsai/python-sdk
|
7de59720cd30e02a5fa2a90fb43eb5bb93c0f63e
|
[
"Apache-2.0"
] | 125
|
2019-10-16T17:57:22.000Z
|
2022-03-08T09:16:01.000Z
|
dapr/actor/runtime/reentrancy_context.py
|
willtsai/python-sdk
|
7de59720cd30e02a5fa2a90fb43eb5bb93c0f63e
|
[
"Apache-2.0"
] | 319
|
2019-10-17T13:49:23.000Z
|
2022-03-31T19:32:53.000Z
|
dapr/actor/runtime/reentrancy_context.py
|
willtsai/python-sdk
|
7de59720cd30e02a5fa2a90fb43eb5bb93c0f63e
|
[
"Apache-2.0"
] | 69
|
2019-10-23T23:22:56.000Z
|
2022-03-16T13:27:17.000Z
|
# -*- coding: utf-8 -*-
"""
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional
from contextvars import ContextVar
reentrancy_ctx: ContextVar[Optional[str]] = ContextVar("reentrancy_ctx", default=None)
| 36.95
| 86
| 0.782138
|
794c91c289b862881ca111da6e84567f519adf61
| 687
|
py
|
Python
|
mysite/learn4/views.py
|
jasongwq/MySite
|
72b89f8c830554e9d31a7d0bb1a5fa715666c51c
|
[
"MIT"
] | null | null | null |
mysite/learn4/views.py
|
jasongwq/MySite
|
72b89f8c830554e9d31a7d0bb1a5fa715666c51c
|
[
"MIT"
] | null | null | null |
mysite/learn4/views.py
|
jasongwq/MySite
|
72b89f8c830554e9d31a7d0bb1a5fa715666c51c
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
from django.shortcuts import render
try:
from django.http import JsonResponse
except ImportError:
from .tool import JsonResponse
def index(request):
return render(request, 'index.html')
def add(request):
a = request.GET['a']
b = request.GET['b']
a = int(a)
b = int(b)
return HttpResponse(str(a+b))
import json
def ajax_list(request):
a = range(100)
return HttpResponse(json.dumps(a), content_type='application/json')
def ajax_dict(request):
name_dict = {'twz': 'Love python and Django', 'zqxt': 'I am teaching Django'}
return HttpResponse(json.dumps(name_dict), content_type='application/json')
| 23.689655
| 81
| 0.700146
|
794c91dddc1930072bbe133c1ec21e028db1bb02
| 1,865
|
py
|
Python
|
catalyst/dl/meters/classerrormeter.py
|
andrey-avdeev/catalyst
|
fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3
|
[
"Apache-2.0"
] | 46
|
2020-03-27T20:12:32.000Z
|
2021-11-21T19:08:51.000Z
|
catalyst/dl/meters/classerrormeter.py
|
andrey-avdeev/catalyst
|
fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3
|
[
"Apache-2.0"
] | 2
|
2020-04-06T10:43:04.000Z
|
2020-07-01T18:26:10.000Z
|
catalyst/dl/meters/classerrormeter.py
|
andrey-avdeev/catalyst
|
fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3
|
[
"Apache-2.0"
] | 5
|
2020-04-17T14:09:53.000Z
|
2021-05-10T08:58:29.000Z
|
import numbers
import numpy as np
import torch
from . import meter
class ClassErrorMeter(meter.Meter):
def __init__(self, topk=[1], accuracy=False):
super(ClassErrorMeter, self).__init__()
self.topk = np.sort(topk)
self.accuracy = accuracy
self.reset()
def reset(self):
self.sum = {v: 0 for v in self.topk}
self.n = 0
def add(self, output, target):
if torch.is_tensor(output):
output = output.cpu().squeeze().numpy()
if torch.is_tensor(target):
target = np.atleast_1d(target.cpu().squeeze().numpy())
elif isinstance(target, numbers.Number):
target = np.asarray([target])
if np.ndim(output) == 1:
output = output[np.newaxis]
else:
assert np.ndim(output) == 2, \
"wrong output size (1D or 2D expected)"
assert np.ndim(target) == 1, \
"target and output do not match"
assert target.shape[0] == output.shape[0], \
"target and output do not match"
topk = self.topk
maxk = int(topk[-1]) # seems like Python3 wants int and not np.int64
no = output.shape[0]
pred = torch.from_numpy(output).topk(maxk, 1, True, True)[1].numpy()
correct = pred == target[:, np.newaxis].repeat(pred.shape[1], 1)
for k in topk:
self.sum[k] += no - correct[:, 0:k].sum()
self.n += no
def value(self, k=-1):
if k != -1:
assert k in self.sum.keys(), \
"invalid k (this k was not provided at construction time)"
if self.accuracy:
return (1. - float(self.sum[k]) / self.n) * 100.0
else:
return float(self.sum[k]) / self.n * 100.0
else:
return [self.value(k_) for k_ in self.topk]
| 32.155172
| 77
| 0.541019
|
794c92127a19f7086fcea770597ffd303456dbc2
| 288
|
py
|
Python
|
pfa/postfix/forms.py
|
fretscha/pfa
|
88de7994779d893cef8e762111d2789bfde96095
|
[
"BSD-3-Clause"
] | 1
|
2015-01-24T13:29:58.000Z
|
2015-01-24T13:29:58.000Z
|
pfa/postfix/forms.py
|
fretscha/pfa
|
88de7994779d893cef8e762111d2789bfde96095
|
[
"BSD-3-Clause"
] | null | null | null |
pfa/postfix/forms.py
|
fretscha/pfa
|
88de7994779d893cef8e762111d2789bfde96095
|
[
"BSD-3-Clause"
] | 3
|
2015-01-25T19:48:31.000Z
|
2021-01-08T20:49:50.000Z
|
from __future__ import absolute_import
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, ButtonHolder, Submit
from . import models
class DomainForm(forms.ModelForm):
class Mata:
fields = ('domain', 'mailboxes',)
| 19.2
| 60
| 0.767361
|
794c92e0bbaf6d4c072a29634eb08f0a347c1b1b
| 3,523
|
py
|
Python
|
proj/shared_settings.py
|
NBISweden/web_scampi2
|
dbe70421109fdb5e556d014d56bbe0e540476461
|
[
"MIT"
] | 1
|
2021-03-25T13:12:17.000Z
|
2021-03-25T13:12:17.000Z
|
proj/shared_settings.py
|
NBISweden/web_scampi2
|
dbe70421109fdb5e556d014d56bbe0e540476461
|
[
"MIT"
] | 14
|
2019-04-24T09:10:56.000Z
|
2022-03-29T08:12:17.000Z
|
proj/shared_settings.py
|
nanjiangshu/web_boctopus2
|
8c925db93568f1419b57397027a083213024f3c1
|
[
"MIT"
] | 1
|
2018-09-17T19:33:57.000Z
|
2018-09-17T19:33:57.000Z
|
"""
Shared settings
Django settings for the project 'proj'
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import logging
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
PARENT_DIR = os.path.realpath("%s/../"%(BASE_DIR))
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'proj.pred',
)
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'proj.urls'
WSGI_APPLICATION = 'proj.wsgi.application'
LOGIN_REDIRECT_URL = '/pred'
LOGOUT_REDIRECT_URL = '/pred/login'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PARENT_DIR, 'db.sqlite3'),
},
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'pred', 'templates'),
os.path.join(BASE_DIR, 'pred', 'static'),
],
'APP_DIRS': True,
'OPTIONS': {
'debug': True,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# LOGGING configuration
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': "%s/%s/%s/%s/debug.log"%(BASE_DIR,"pred", "static", "log"),
},
},
'loggers': {
'django.request': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
},
}
logging.basicConfig(level=logging.INFO)
logging.getLogger('spyne.protocol.xml').setLevel(logging.DEBUG)
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = "%s/pred/static"%(BASE_DIR)
SUPER_USER_LIST = ["admin","nanjiang", "njshu"]
| 25.904412
| 91
| 0.643486
|
794c93102c01c96f739704ed941805e5dcd6d5d9
| 769
|
py
|
Python
|
authorityControl/postOverwrite.py
|
cul/archivesspace
|
9c088d4495cf1006c7d02ed2850224a9d28b35c1
|
[
"MIT"
] | 4
|
2018-12-13T16:18:30.000Z
|
2020-02-14T14:01:28.000Z
|
authorityControl/postOverwrite.py
|
cul/archivesspace
|
9c088d4495cf1006c7d02ed2850224a9d28b35c1
|
[
"MIT"
] | null | null | null |
authorityControl/postOverwrite.py
|
cul/archivesspace
|
9c088d4495cf1006c7d02ed2850224a9d28b35c1
|
[
"MIT"
] | 2
|
2019-09-03T19:15:24.000Z
|
2020-12-01T20:27:14.000Z
|
import json
import requests
import secrets
import time
startTime = time.time()
baseURL = secrets.baseURL
user = secrets.user
password = secrets.password
auth = requests.post(baseURL + '/users/'+user+'/login?password='+password).json()
session = auth["session"]
headers = {'X-ArchivesSpace-Session':session, 'Content_Type':'application/json'}
#records = json.load(open('[JSON File]'))
records = json.load(open('corps.json'))
for i in range (0, len (records)):
record = json.dumps(records[i])
uri = records[i]['uri']
post = requests.post(baseURL + uri, headers=headers, data=record).json()
print post
elapsedTime = time.time() - startTime
m, s = divmod(elapsedTime, 60)
h, m = divmod(m, 60)
print 'Total script run time: ', '%d:%02d:%02d' % (h, m, s)
| 28.481481
| 81
| 0.689207
|
794c9311a3a5835c9f28c3df4a64cbc9188b4721
| 1,300
|
py
|
Python
|
src/OTLMOW/OTLModel/Datatypes/KlVormSchermelement.py
|
davidvlaminck/OTLClassPython
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | 2
|
2022-02-01T08:58:11.000Z
|
2022-02-08T13:35:17.000Z
|
src/OTLMOW/OTLModel/Datatypes/KlVormSchermelement.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
src/OTLMOW/OTLModel/Datatypes/KlVormSchermelement.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlVormSchermelement(KeuzelijstField):
"""Deze keuzelijst geeft aan of het schermelement recht of gebogen is."""
naam = 'KlVormSchermelement'
label = 'Vorm schermelement'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlVormSchermelement'
definition = 'Deze keuzelijst geeft aan of het schermelement recht of gebogen is.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlVormSchermelement'
options = {
'gebogen': KeuzelijstWaarde(invulwaarde='gebogen',
label='gebogen',
definitie='gebogen',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVormSchermelement/gebogen'),
'recht': KeuzelijstWaarde(invulwaarde='recht',
label='recht',
definitie='recht',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVormSchermelement/recht')
}
| 52
| 130
| 0.65
|
794c93168fdeaf93fe0a2ca41c2c6bc71f0794e2
| 11,129
|
py
|
Python
|
core/storage/story/gae_models.py
|
apoorva-saxena-test/oppia
|
f9e1e8d4f2f9f48efef121a720459f98ecb08ddf
|
[
"Apache-2.0"
] | null | null | null |
core/storage/story/gae_models.py
|
apoorva-saxena-test/oppia
|
f9e1e8d4f2f9f48efef121a720459f98ecb08ddf
|
[
"Apache-2.0"
] | null | null | null |
core/storage/story/gae_models.py
|
apoorva-saxena-test/oppia
|
f9e1e8d4f2f9f48efef121a720459f98ecb08ddf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for storing the story data models."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from constants import constants
from core.platform import models
from google.appengine.ext import ndb
(base_models, user_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.user])
class StorySnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for a story snapshot."""
pass
class StorySnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of a story snapshot."""
pass
class StoryModel(base_models.VersionedModel):
"""Model for storing stories.
This class should only be imported by the story services file
and the story model test file.
"""
SNAPSHOT_METADATA_CLASS = StorySnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = StorySnapshotContentModel
ALLOW_REVERT = False
# The title of the story.
title = ndb.StringProperty(required=True, indexed=True)
# The thumbnail filename of the story.
thumbnail_filename = ndb.StringProperty(indexed=True)
# The thumbnail background color of the story.
thumbnail_bg_color = ndb.StringProperty(indexed=True)
# A high-level description of the story.
description = ndb.StringProperty(indexed=False)
# A set of notes, that describe the characters, main storyline, and setting.
notes = ndb.TextProperty(indexed=False)
# The ISO 639-1 code for the language this story is written in.
language_code = ndb.StringProperty(required=True, indexed=True)
# The story contents dict specifying the list of story nodes and the
# connection between them. Modelled by class StoryContents
# (see story_domain.py for its current schema).
story_contents = ndb.JsonProperty(default={}, indexed=False)
# The schema version for the story_contents.
story_contents_schema_version = (
ndb.IntegerProperty(required=True, indexed=True))
# The topic id to which the story belongs.
corresponding_topic_id = ndb.StringProperty(indexed=True, required=True)
# The url fragment for the story.
url_fragment = ndb.StringProperty(required=True, indexed=True)
# The content of the meta tag in the Story viewer page.
meta_tag_content = ndb.StringProperty(indexed=True)
@staticmethod
def get_deletion_policy():
"""Story should be kept if the corresponding topic is published."""
return base_models.DELETION_POLICY.KEEP_IF_PUBLIC
@classmethod
def has_reference_to_user_id(cls, user_id):
"""Check whether StoryModel snapshots references the given user.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return cls.SNAPSHOT_METADATA_CLASS.exists_for_user_id(user_id)
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
super(StoryModel, self)._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
story_commit_log_entry = StoryCommitLogEntryModel.create(
self.id, self.version, committer_id, commit_type, commit_message,
commit_cmds, constants.ACTIVITY_STATUS_PUBLIC, False
)
story_commit_log_entry.story_id = self.id
story_commit_log_entry.put()
@classmethod
def get_export_policy(cls):
"""Model does not contain user data."""
return dict(super(cls, cls).get_export_policy(), **{
'title': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'thumbnail_filename': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'thumbnail_bg_color': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'description': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'notes': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'story_contents': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'story_contents_schema_version':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'corresponding_topic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'url_fragment': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'meta_tag_content': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
@classmethod
def get_by_url_fragment(cls, url_fragment):
"""Gets StoryModel by url_fragment. Returns None if the story with
name url_fragment doesn't exist.
Args:
url_fragment: str. The url fragment of the story.
Returns:
StoryModel|None. The story model of the story or None if not
found.
"""
return StoryModel.query().filter(
cls.url_fragment == url_fragment).filter(
cls.deleted == False).get() # pylint: disable=singleton-comparison
class StoryCommitLogEntryModel(base_models.BaseCommitLogEntryModel):
"""Log of commits to stories.
A new instance of this model is created and saved every time a commit to
StoryModel occurs.
The id for this model is of the form 'story-[story_id]-[version]'.
"""
# The id of the story being edited.
story_id = ndb.StringProperty(indexed=True, required=True)
@staticmethod
def get_deletion_policy():
"""Story commit log is deleted only if the corresponding collection
is not public.
"""
return base_models.DELETION_POLICY.KEEP_IF_PUBLIC
@classmethod
def _get_instance_id(cls, story_id, version):
"""This function returns the generated id for the get_commit function
in the parent class.
Args:
story_id: str. The id of the story being edited.
version: int. The version number of the story after the commit.
Returns:
str. The commit id with the story id and version number.
"""
return 'story-%s-%s' % (story_id, version)
@classmethod
def get_export_policy(cls):
"""This model is only stored for archive purposes. The commit log of
entities is not related to personal user data.
"""
return dict(super(cls, cls).get_export_policy(), **{
'story_id': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
class StorySummaryModel(base_models.BaseModel):
"""Summary model for an Oppia Story.
This should be used whenever the content blob of the story is not
needed (e.g. search results, etc).
A StorySummaryModel instance stores the following information:
id, description, language_code, last_updated, created_on, version.
The key of each instance is the story id.
"""
# The title of the story.
title = ndb.StringProperty(required=True, indexed=True)
# The ISO 639-1 code for the language this story is written in.
language_code = ndb.StringProperty(required=True, indexed=True)
# A high-level description of the story.
description = ndb.StringProperty(required=True, indexed=True)
# Time when the story model was last updated (not to be
# confused with last_updated, which is the time when the
# story *summary* model was last updated).
story_model_last_updated = ndb.DateTimeProperty(required=True, indexed=True)
# Time when the story model was created (not to be confused
# with created_on, which is the time when the story *summary*
# model was created).
story_model_created_on = ndb.DateTimeProperty(required=True, indexed=True)
# The titles of the nodes in the story, in the same order as present there.
node_titles = ndb.StringProperty(repeated=True, indexed=False)
# The thumbnail filename of the story.
thumbnail_filename = ndb.StringProperty(indexed=True)
# The thumbnail background color of the story.
thumbnail_bg_color = ndb.StringProperty(indexed=True)
version = ndb.IntegerProperty(required=True)
# The url fragment for the story.
url_fragment = ndb.StringProperty(required=True, indexed=True)
@staticmethod
def get_deletion_policy():
"""Story summary should be kept if the corresponding topic is
published.
"""
return base_models.DELETION_POLICY.KEEP_IF_PUBLIC
@classmethod
def has_reference_to_user_id(cls, unused_user_id):
"""Check whether StorySummaryModel references the given user.
Args:
unused_user_id: str. The (unused) ID of the user whose data should
be checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return False
@classmethod
def get_export_policy(cls):
"""Model does not contain user data."""
return dict(super(cls, cls).get_export_policy(), **{
'title': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'description': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'story_model_last_updated':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'story_model_created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'node_titles': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'thumbnail_filename': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'thumbnail_bg_color': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'version': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'url_fragment': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
| 40.616788
| 82
| 0.696559
|
794c950d26b3939ef246cd4b151bb2643194601f
| 15,158
|
py
|
Python
|
Packs/FireEye-Detection-on-Demand/Integrations/FireEye-Detection-on-Demand/FireEye-Detection-on-Demand.py
|
mckibbenc/content
|
2a54b699ea8b2f73afd48393c5104be258935b5d
|
[
"MIT"
] | null | null | null |
Packs/FireEye-Detection-on-Demand/Integrations/FireEye-Detection-on-Demand/FireEye-Detection-on-Demand.py
|
mckibbenc/content
|
2a54b699ea8b2f73afd48393c5104be258935b5d
|
[
"MIT"
] | null | null | null |
Packs/FireEye-Detection-on-Demand/Integrations/FireEye-Detection-on-Demand/FireEye-Detection-on-Demand.py
|
mckibbenc/content
|
2a54b699ea8b2f73afd48393c5104be258935b5d
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict, List, Optional, Tuple
import dateparser
import demistomock as demisto
import requests
from CommonServerPython import * # noqa: F401
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
''' CLIENT CLASS '''
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any Demisto logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
For this HelloWorld implementation, no special attributes defined
"""
def get_file_reputation(self, file: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix=f'/hashes/{file}'
)
def get_health(self) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix='/health'
)
def submit_file(self, files: Dict[str, Any], data: Dict[str, Any]) -> Dict[str, Any]:
return self._http_request(
method='POST',
url_suffix='/files',
files=files,
data=data
)
def submit_urls(self, data: Dict[str, Any]) -> Dict[str, Any]:
return self._http_request(
method='POST',
url_suffix='/urls',
files=data,
data=None
)
def get_report_url(self, report_id: str, expiration: int) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix=f'/presigned-url/{report_id}',
params={
'expiry': expiration
}
)
def report_status(self, report_id: str, extended: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix=f'/reports/{report_id}',
params={
'extended': extended
}
)
def report_artifact(self, report_id: str, artifact_type: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix=f'/artifacts/{report_id}',
params={
'type': artifact_type,
},
resp_type='content'
)
''' HELPER FUNCTIONS '''
def convert_to_demisto_severity(severity: str) -> int:
# In this case the mapping is straightforward, but more complex mappings
# might be required in your integration, so a dedicated function is
# recommended. This mapping should also be documented.
return {
'Low': 1, # low severity
'Medium': 2, # medium severity
'High': 3, # high severity
'Critical': 4 # critical severity
}[severity]
def arg_to_int(arg: Any, arg_name: str, required: bool = False) -> Optional[int]:
if arg is None:
if required is True:
raise ValueError(f'Missing "{arg_name}"')
return None
if isinstance(arg, str):
if arg.isdigit():
return int(arg)
raise ValueError(f'Invalid number: "{arg_name}"="{arg}"')
if isinstance(arg, int):
return arg
raise ValueError(f'Invalid number: "{arg_name}"')
def arg_to_timestamp(arg: Any, arg_name: str, required: bool = False) -> Optional[int]:
if arg is None:
if required is True:
raise ValueError(f'Missing "{arg_name}"')
return None
if isinstance(arg, str) and arg.isdigit():
# timestamp is a str containing digits - we just convert it to int
return int(arg)
if isinstance(arg, str):
# we use dateparser to handle strings either in ISO8601 format, or
# relative time stamps.
# For example: format 2019-10-23T00:00:00 or "3 days", etc
date = dateparser.parse(arg, settings={'TIMEZONE': 'UTC'})
if date is None:
# if d is None it means dateparser failed to parse it
raise ValueError(f'Invalid date: {arg_name}')
return int(date.timestamp())
if isinstance(arg, (int, float)):
# Convert to int if the input is a float
return int(arg)
raise ValueError(f'Invalid date: "{arg_name}"')
''' COMMAND FUNCTIONS '''
def test_module(client: Client) -> str:
# INTEGRATION DEVELOPER TIP
# Client class should raise the exceptions, but if the test fails
# the exception text is printed to the Cortex XSOAR UI.
# If you have some specific errors you want to capture (i.e. auth failure)
# you should catch the exception here and return a string with a more
# readable output (for example return 'Authentication Error, API Key
# invalid').
# Cortex XSOAR will print everything you return different than 'ok' as
# an error
try:
#
client.get_health()
except DemistoException as e:
if 'Forbidden' in str(e):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return 'ok'
def get_hashes_command(client: Client, args: Dict[str, Any]) -> Tuple[str, dict, Any]:
hashes = argToList(args.get('md5_hashes'))
if len(hashes) == 0:
raise ValueError('hash(es) not specified')
for hash in hashes:
if md5Regex.match(hash):
continue
raise Exception('Invalid hash. Only MD5 is supported.')
dbot_score_list: List[Dict[str, Any]] = []
file_standard_list: List[Dict[str, Any]] = []
file_data_list: List[Dict[str, Any]] = []
for hash in hashes:
file_data = client.get_file_reputation(hash)
file_data['MD5'] = file_data['md5']
del file_data['md5']
# demisto.results(file_data)
engines = file_data.get('engine_results', {})
for key in engines.keys():
if engines[key].get('sha256'):
file_data['SHA256'] = engines[key].get('sha256')
del engines[key]['sha256']
# If the outer `is_malicious` is set to True, assume the score should be bad
# Otherwise, default to unknown unless at least one engine has returned a verdict besides `not_found`
if file_data['is_malicious']:
score = 3 # bad
else:
score = 0 # unknown
for key in engines.keys():
verdict = engines[key].get('verdict', 'not_found')
if verdict != "not_found" and verdict != "malicious":
score = 1 # good
break
dbot_score = {
'Indicator': hash,
'Vendor': 'FireEye DoD',
'Type': 'file',
'Score': score
}
file_standard_context = {
'MD5': hash,
}
if score == 3:
# if score is bad must add DBotScore Vendor and Description
file_standard_context['Malicious'] = {
'Vendor': 'FireEye DoD'
}
filedata = {}
filedata['FireEyeDoD'] = file_data
filedata['MD5'] = file_data['MD5']
del filedata['FireEyeDoD']['MD5']
if file_data.get('SHA256'):
dbot_score_sha256 = {
'Indicator': file_data.get('SHA256'),
'Vendor': 'FireEye DoD',
'Type': 'file',
'Score': score
}
dbot_score_list.append(dbot_score_sha256)
filedata['SHA256'] = file_data['SHA256']
file_standard_context['SHA256'] = file_data['SHA256']
del filedata['FireEyeDoD']['SHA256']
file_standard_list.append(file_standard_context)
dbot_score_list.append(dbot_score)
file_data_list.append(filedata)
outputs = {
'DBotScore(val.Vendor == obj.Vendor && val.Indicator == obj.Indicator)': dbot_score_list,
outputPaths['file']: file_standard_list,
'File(val.MD5 == obj.MD5 || val.SHA256 == obj.SHA256)': file_data_list
}
readable_output = tableToMarkdown('FireEye DoD Results', file_standard_list, headers=["MD5", "SHA256", "Malicious"])
return (
readable_output,
outputs,
file_data_list
)
def generate_report_url(client: Client, args: Dict[str, Any]) -> Tuple[str, dict, dict]:
report_id = str(args.get('report_id'))
expiration = arg_to_int(arg=args.get('expiration'), arg_name='expiration', required=True)
if expiration:
if expiration < 1 or expiration > 8760:
raise ValueError('Expiration must be between 1 and 8760 hours.')
else:
raise ValueError('Expiration not specified or not a number.')
report = client.get_report_url(report_id=report_id, expiration=expiration)
presigned_report_url = report.get('presigned_report_url')
readable_output = f'Report {report_id} is available [here]({presigned_report_url})'
return (
readable_output,
{},
report
)
def submit_file_command(client: Client, args: Dict[str, Any]) -> Tuple[str, dict, dict]:
entry_id = demisto.args().get('entryID')
file_entry = demisto.getFilePath(entry_id) # .get('path')
file_name = file_entry['name']
file_path = file_entry['path']
files = {'file': (file_name, open(file_path, 'rb'))}
# Optional parameters to send along with the file
optional_params = ['password', 'param', 'screenshot', 'video', 'fileExtraction', 'memoryDump', 'pcap']
data = {}
for param in optional_params:
value = demisto.args().get(param)
if value:
data[param] = value
scan = client.submit_file(files=files, data=data)
scan['filename'] = file_name
del scan['status']
scan['overall_status'] = 'RUNNING'
report_id = scan.get('report_id')
readable_output = (
f'Started analysis of {file_name} with FireEye Detection on Demand.'
f'Results will be published to report id: {report_id}'
)
outputs = {
'FireEyeDoD.Scan(val.report_id == obj.report_id)': scan
}
return (
readable_output,
outputs,
scan
)
def submit_urls_command(client: Client, args: Dict[str, Any]) -> Tuple[str, dict, dict]:
urls = argToList(args.get('urls'))
if len(urls) == 0:
raise ValueError('hash(es) not specified')
# Format the URLs into a string list, which the API understands
formatted_urls = "[" + ",".join(list(map(lambda url: url.replace(url, f'"{url}"'), urls))) + "]"
data = {'urls': formatted_urls}
scan = client.submit_urls(data=data)
del scan['status']
scan['overall_status'] = 'RUNNING'
report_id = scan.get('report_id')
readable_output = (
f'Started analysis of {urls} with FireEye Detection on Demand.'
f'Results will be published to report id: {report_id}'
)
outputs = {
'FireEyeDoD.Scan(val.report_id == obj.report_id)': scan
}
return (
readable_output,
outputs,
scan
)
def get_reports_command(client: Client, args: Dict[str, Any]) -> Tuple[str, dict, Any]:
report_id_list = argToList(args.get('report_ids', []))
extended = args.get('extended_report', "False")
screenshot = args.get('get_screenshot', "false")
artifact = args.get('get_artifact', "")
if len(report_id_list) == 0:
raise ValueError('report_id(s) not specified')
report_list: List[Dict[str, Any]] = []
for report_id in report_id_list:
report = client.report_status(report_id=report_id, extended=extended)
if screenshot.lower() == "true":
screenshot = client.report_artifact(report_id=report_id, artifact_type="screenshot")
stored_img = fileResult('screenshot.gif', screenshot)
demisto.results({'Type': entryTypes['image'], 'ContentsFormat': formats['text'],
'File': stored_img['File'], 'FileID': stored_img['FileID'], 'Contents': ''})
if artifact != "":
artifacts = client.report_artifact(report_id=report_id, artifact_type=artifact)
stored_artifacts = fileResult('artifacts.zip', artifacts)
demisto.results({'Type': entryTypes['file'], 'ContentsFormat': formats['text'],
'File': stored_artifacts['File'], 'FileID': stored_artifacts['FileID'], 'Contents': ''})
report_list.append(report)
readable_output = tableToMarkdown('Scan status', report_list)
outputs = {
'FireEyeDoD.Scan(val.report_id == obj.report_id)': report_list
}
return (
readable_output,
outputs,
report_list
)
''' MAIN FUNCTION '''
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
api_key = demisto.params().get('apikey')
# get the service API url
base_url = demisto.params()['url']
# if your Client class inherits from BaseClient, SSL verification is
# handled out of the box by it, just pass ``verify_certificate`` to
# the Client constructor
verify_certificate = not demisto.params().get('insecure', False)
# if your Client class inherits from BaseClient, system proxy is handled
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = demisto.params().get('proxy', False)
# INTEGRATION DEVELOPER TIP
# You can use functions such as ``demisto.debug()``, ``demisto.info()``,
# etc. to print information in the XSOAR server log. You can set the log
# level on the server configuration
# See: https://xsoar.pan.dev/docs/integrations/code-conventions#logging
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'feye-auth-key': f'{api_key}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif demisto.command() == 'fireeye-dod-get-hashes':
return_outputs(*get_hashes_command(client, demisto.args()))
elif demisto.command() == 'fireeye-dod-get-reports':
return_outputs(*get_reports_command(client, demisto.args()))
elif demisto.command() == 'fireeye-dod-submit-file':
return_outputs(*submit_file_command(client, demisto.args()))
elif demisto.command() == 'fireeye-dod-submit-urls':
return_outputs(*submit_urls_command(client, demisto.args()))
elif demisto.command() == 'fireeye-dod-get-report-url':
return_outputs(*generate_report_url(client, demisto.args()))
# Log exceptions and return errors
except Exception as e:
raise e
# demisto.error(traceback.format_exc()) # print the traceback
# return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 33.684444
| 120
| 0.609975
|
794c96459df2f5ab5ab9c92458e818aeba34e390
| 7,072
|
py
|
Python
|
flit/init.py
|
Zaab1t/flit
|
58e8eaf224c11c5ad4bb7ff06350f95989bbb867
|
[
"BSD-3-Clause"
] | 1
|
2020-07-22T19:50:08.000Z
|
2020-07-22T19:50:08.000Z
|
flit/init.py
|
Mariatta/flit
|
0a44f806f1689b6ae5d9e4c0b82e3b5094ab32a2
|
[
"BSD-3-Clause"
] | null | null | null |
flit/init.py
|
Mariatta/flit
|
0a44f806f1689b6ae5d9e4c0b82e3b5094ab32a2
|
[
"BSD-3-Clause"
] | null | null | null |
from collections import OrderedDict
from datetime import date
import json
import os
from pathlib import Path
import re
import sys
import pytoml as toml
def get_data_dir():
"""Get the directory path for flit user data files.
"""
home = os.path.realpath(os.path.expanduser('~'))
if sys.platform == 'darwin':
d = Path(home, 'Library')
elif os.name == 'nt':
appdata = os.environ.get('APPDATA', None)
if appdata:
d = Path(appdata)
else:
d = Path(home, 'AppData', 'Roaming')
else:
# Linux, non-OS X Unix, AIX, etc.
xdg = os.environ.get("XDG_DATA_HOME", None)
d = Path(xdg) if xdg else Path(home, '.local/share')
return d / 'flit'
def get_defaults():
try:
with (get_data_dir() / 'init_defaults.json').open(encoding='utf-8') as f:
return json.load(f)
except FileNotFoundError:
return {}
def store_defaults(d):
data_dir = get_data_dir()
try:
data_dir.mkdir(parents=True)
except FileExistsError:
pass
with (data_dir / 'init_defaults.json').open('w', encoding='utf-8') as f:
json.dump(d, f, indent=2)
license_choices = [
('mit', "MIT - simple and permissive"),
('apache', "Apache - explicitly grants patent rights"),
('gpl3', "GPL - ensures that code based on this is shared with the same terms"),
('skip', "Skip - choose a license later"),
]
license_names_to_classifiers = {
'mit': 'License :: OSI Approved :: MIT License',
'gpl3': 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'apache': 'License :: OSI Approved :: Apache Software License'
}
license_templates_dir = Path(__file__).parent / 'license_templates'
class IniterBase:
def __init__(self, directory='.'):
self.directory = Path(directory)
self.defaults = get_defaults()
def validate_email(self, s):
# Properly validating an email address is much more complex
return bool(re.match(r'.+@.+', s))
def guess_module_name(self):
packages, modules = [], []
for p in self.directory.iterdir():
if not p.stem.isidentifier():
continue
if p.is_dir() and (p / '__init__.py').is_file():
if p.name not in {'test', 'tests'}:
packages.append(p.name)
elif p.is_file() and p.suffix == '.py':
if p.stem not in {'setup'} and not p.name.startswith('test_'):
modules.append(p.stem)
if len(packages) == 1:
return packages[0]
elif len(packages) == 0 and len(modules) == 1:
return modules[0]
else:
return None
def update_defaults(self, author, author_email, module, home_page, license):
new_defaults = {'author': author, 'author_email': author_email,
'license': license}
name_chunk_pat = r'\b{}\b'.format(re.escape(module))
if re.search(name_chunk_pat, home_page):
new_defaults['home_page_template'] = \
re.sub(name_chunk_pat, '{modulename}', home_page, flags=re.I)
if any(new_defaults[k] != self.defaults.get(k) for k in new_defaults):
self.defaults.update(new_defaults)
store_defaults(self.defaults)
def write_license(self, name, author):
if (self.directory / 'LICENSE').exists():
return
year = date.today().year
with (license_templates_dir / name).open(encoding='utf-8') as f:
license_text = f.read()
with (self.directory / 'LICENSE').open('w', encoding='utf-8') as f:
f.write(license_text.format(year=year, author=author))
class TerminalIniter(IniterBase):
def prompt_text(self, prompt, default, validator):
if default is not None:
p = "{} [{}]: ".format(prompt, default)
else:
p = prompt + ': '
while True:
response = input(p)
if response == '' and default is not None:
response = default
if validator(response):
return response
print("Try again.")
def prompt_options(self, prompt, options, default=None):
default_ix = None
print(prompt)
for i, (key, text) in enumerate(options, start=1):
print("{}. {}".format(i, text))
if key == default:
default_ix = i
while True:
p = "Enter 1-" + str(len(options))
if default_ix is not None:
p += ' [{}]'.format(default_ix)
response = input(p+': ')
if (default_ix is not None) and response == '':
return default
if response.isnumeric():
ir = int(response)
if 1 <= ir <= len(options):
return options[ir-1][0]
print("Try again.")
def initialise(self):
if (self.directory / 'pyproject.toml').exists():
resp = input("pyproject.toml exists - overwrite it? [y/N]: ")
if (not resp) or resp[0].lower() != 'y':
return
module = self.prompt_text('Module name', self.guess_module_name(),
str.isidentifier)
author = self.prompt_text('Author', self.defaults.get('author'),
lambda s: s != '')
author_email = self.prompt_text('Author email',
self.defaults.get('author_email'), self.validate_email)
if 'home_page_template' in self.defaults:
home_page_default = self.defaults['home_page_template'].replace(
'{modulename}', module)
else:
home_page_default = None
home_page = self.prompt_text('Home page', home_page_default,
lambda s: s != '')
license = self.prompt_options('Choose a license (see http://choosealicense.com/ for more info)',
license_choices, self.defaults.get('license'))
self.update_defaults(author=author, author_email=author_email,
home_page=home_page, module=module, license=license)
metadata = OrderedDict([
('module', module),
('author', author),
('author-email', author_email),
('home-page', home_page),
])
if license != 'skip':
metadata['classifiers'] = [license_names_to_classifiers[license]]
self.write_license(license, author)
with (self.directory / 'pyproject.toml').open('w', encoding='utf-8') as f:
f.write(TEMPLATE.format(metadata=toml.dumps(metadata)))
print()
print("Written pyproject.toml; edit that file to add optional extra info.")
TEMPLATE = """\
[build-system]
requires = ["flit"]
build-backend = "flit.buildapi"
[tool.flit.metadata]
{metadata}
"""
if __name__ == '__main__':
TerminalIniter().initialise()
| 34.837438
| 104
| 0.56349
|
794c96bb486bafbda42421d3069815190e1464f6
| 194
|
py
|
Python
|
filer/server/main_server_urls.py
|
PeterW-LWL/django-filer
|
472a0419bfa185a8b0a861bd0779ac6d817082c7
|
[
"BSD-3-Clause"
] | 134
|
2015-01-01T17:57:03.000Z
|
2021-11-01T15:21:47.000Z
|
filer/server/main_server_urls.py
|
PeterW-LWL/django-filer
|
472a0419bfa185a8b0a861bd0779ac6d817082c7
|
[
"BSD-3-Clause"
] | 143
|
2015-01-05T04:53:01.000Z
|
2015-11-27T14:44:29.000Z
|
filer/server/main_server_urls.py
|
PeterW-LWL/django-filer
|
472a0419bfa185a8b0a861bd0779ac6d817082c7
|
[
"BSD-3-Clause"
] | 86
|
2015-01-05T13:05:25.000Z
|
2021-04-03T01:36:15.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^(?P<path>.*)$', views.serve_protected_file),
]
| 16.166667
| 55
| 0.680412
|
794c9731cb2ed5a2b32c2594f7b917bd24737cf5
| 1,641
|
py
|
Python
|
app/api/models.py
|
deckTECHeu/profiles-rest-api
|
031a917f2c5dfbccca60ffec4c55addf57433e61
|
[
"MIT"
] | null | null | null |
app/api/models.py
|
deckTECHeu/profiles-rest-api
|
031a917f2c5dfbccca60ffec4c55addf57433e61
|
[
"MIT"
] | 6
|
2021-03-19T11:56:01.000Z
|
2022-02-10T10:35:55.000Z
|
app/api/models.py
|
deckTECHeu/profiles-rest-api
|
031a917f2c5dfbccca60ffec4c55addf57433e61
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError('User must have an email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create and save a new superuser with given details"""
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in the system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name of user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
def __str__(self):
"""Return string representation of user"""
return self.email
| 28.789474
| 64
| 0.675807
|
794c9732fd94b9e9e48dbc4d2aa03f9c131e2674
| 715
|
py
|
Python
|
odoo-13.0/addons/website_membership/models/membership.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/addons/website_membership/models/membership.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/addons/website_membership/models/membership.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | 1
|
2021-05-05T07:59:08.000Z
|
2021-05-05T07:59:08.000Z
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models
class MembershipLine(models.Model):
_inherit = 'membership.membership_line'
def get_published_companies(self, limit=None):
if not self.ids:
return []
limit_clause = '' if limit is None else ' LIMIT %d' % limit
self.env.cr.execute("""
SELECT DISTINCT p.id
FROM res_partner p INNER JOIN membership_membership_line m
ON p.id = m.partner
WHERE is_published AND is_company AND m.id IN %s """ + limit_clause, (tuple(self.ids),))
return [partner_id[0] for partner_id in self.env.cr.fetchall()]
| 35.75
| 100
| 0.641958
|
794c97b618476d2dc555fbd17578ab10a79ece13
| 57,041
|
py
|
Python
|
openmc/settings.py
|
SamPUG/openmc
|
70c19dfef4991c5f89161a87868f109c47efc76d
|
[
"MIT"
] | 1
|
2020-11-19T14:46:10.000Z
|
2020-11-19T14:46:10.000Z
|
openmc/settings.py
|
SamPUG/openmc
|
70c19dfef4991c5f89161a87868f109c47efc76d
|
[
"MIT"
] | 2
|
2020-02-24T15:13:15.000Z
|
2020-03-17T18:59:22.000Z
|
openmc/settings.py
|
SamPUG/openmc
|
70c19dfef4991c5f89161a87868f109c47efc76d
|
[
"MIT"
] | null | null | null |
from collections.abc import Iterable, MutableSequence, Mapping
from pathlib import Path
from numbers import Real, Integral
import warnings
from xml.etree import ElementTree as ET
import sys
import numpy as np
from openmc._xml import clean_indentation, get_text
import openmc.checkvalue as cv
from openmc import VolumeCalculation, Source, RegularMesh
_RUN_MODES = ['eigenvalue', 'fixed source', 'plot', 'volume', 'particle restart']
_RES_SCAT_METHODS = ['dbrc', 'rvs']
class Settings(object):
"""Settings used for an OpenMC simulation.
Attributes
----------
batches : int
Number of batches to simulate
confidence_intervals : bool
If True, uncertainties on tally results will be reported as the
half-width of the 95% two-sided confidence interval. If False,
uncertainties on tally results will be reported as the sample standard
deviation.
create_fission_neutrons : bool
Indicate whether fission neutrons should be created or not.
cutoff : dict
Dictionary defining weight cutoff and energy cutoff. The dictionary may
have six keys, 'weight', 'weight_avg', 'energy_neutron', 'energy_photon',
'energy_electron', and 'energy_positron'. Value for 'weight'
should be a float indicating weight cutoff below which particle undergo
Russian roulette. Value for 'weight_avg' should be a float indicating
weight assigned to particles that are not killed after Russian
roulette. Value of energy should be a float indicating energy in eV
below which particle type will be killed.
dagmc : bool
Indicate that a CAD-based DAGMC geometry will be used.
delayed_photon_scaling : bool
Indicate whether to scale the fission photon yield by (EGP + EGD)/EGP
where EGP is the energy release of prompt photons and EGD is the energy
release of delayed photons.
electron_treatment : {'led', 'ttb'}
Whether to deposit all energy from electrons locally ('led') or create
secondary bremsstrahlung photons ('ttb').
energy_mode : {'continuous-energy', 'multi-group'}
Set whether the calculation should be continuous-energy or multi-group.
entropy_mesh : openmc.RegularMesh
Mesh to be used to calculate Shannon entropy. If the mesh dimensions are
not specified. OpenMC assigns a mesh such that 20 source sites per mesh
cell are to be expected on average.
event_based : bool
Indicate whether to use event-based parallelism instead of the default
history-based parallelism.
generations_per_batch : int
Number of generations per batch
max_lost_particles : int
Maximum number of lost particles
rel_max_lost_particles : int
Maximum number of lost particles, relative to the total number of particles
inactive : int
Number of inactive batches
keff_trigger : dict
Dictionary defining a trigger on eigenvalue. The dictionary must have
two keys, 'type' and 'threshold'. Acceptable values corresponding to
type are 'variance', 'std_dev', and 'rel_err'. The threshold value
should be a float indicating the variance, standard deviation, or
relative error used.
log_grid_bins : int
Number of bins for logarithmic energy grid search
material_cell_offsets : bool
Generate an "offset table" for material cells by default. These tables
are necessary when a particular instance of a cell needs to be tallied.
max_particles_in_flight : int
Number of neutrons to run concurrently when using event-based
parallelism.
max_order : None or int
Maximum scattering order to apply globally when in multi-group mode.
no_reduce : bool
Indicate that all user-defined and global tallies should not be reduced
across processes in a parallel calculation.
output : dict
Dictionary indicating what files to output. Acceptable keys are:
:path: String indicating a directory where output files should be
written
:summary: Whether the 'summary.h5' file should be written (bool)
:tallies: Whether the 'tallies.out' file should be written (bool)
particles : int
Number of particles per generation
photon_transport : bool
Whether to use photon transport.
ptables : bool
Determine whether probability tables are used.
resonance_scattering : dict
Settings for resonance elastic scattering. Accepted keys are 'enable'
(bool), 'method' (str), 'energy_min' (float), 'energy_max' (float), and
'nuclides' (list). The 'method' can be set to 'dbrc' (Doppler broadening
rejection correction) or 'rvs' (relative velocity sampling). If not
specified, 'rvs' is the default method. The 'energy_min' and
'energy_max' values indicate the minimum and maximum energies above and
below which the resonance elastic scattering method is to be
applied. The 'nuclides' list indicates what nuclides the method should
be applied to. In its absence, the method will be applied to all
nuclides with 0 K elastic scattering data present.
run_mode : {'eigenvalue', 'fixed source', 'plot', 'volume', 'particle restart'}
The type of calculation to perform (default is 'eigenvalue')
seed : int
Seed for the linear congruential pseudorandom number generator
source : Iterable of openmc.Source
Distribution of source sites in space, angle, and energy
sourcepoint : dict
Options for writing source points. Acceptable keys are:
:batches: list of batches at which to write source
:overwrite: bool indicating whether to overwrite
:separate: bool indicating whether the source should be written as a
separate file
:write: bool indicating whether or not to write the source
statepoint : dict
Options for writing state points. Acceptable keys are:
:batches: list of batches at which to write source
survival_biasing : bool
Indicate whether survival biasing is to be used
tabular_legendre : dict
Determines if a multi-group scattering moment kernel expanded via
Legendre polynomials is to be converted to a tabular distribution or
not. Accepted keys are 'enable' and 'num_points'. The value for
'enable' is a bool stating whether the conversion to tabular is
performed; the value for 'num_points' sets the number of points to use
in the tabular distribution, should 'enable' be True.
temperature : dict
Defines a default temperature and method for treating intermediate
temperatures at which nuclear data doesn't exist. Accepted keys are
'default', 'method', 'range', 'tolerance', and 'multipole'. The value
for 'default' should be a float representing the default temperature in
Kelvin. The value for 'method' should be 'nearest' or 'interpolation'.
If the method is 'nearest', 'tolerance' indicates a range of temperature
within which cross sections may be used. The value for 'range' should be
a pair a minimum and maximum temperatures which are used to indicate
that cross sections be loaded at all temperatures within the
range. 'multipole' is a boolean indicating whether or not the windowed
multipole method should be used to evaluate resolved resonance cross
sections.
trace : tuple or list
Show detailed information about a single particle, indicated by three
integers: the batch number, generation number, and particle number
track : tuple or list
Specify particles for which track files should be written. Each particle
is identified by a triplet with the batch number, generation number, and
particle number.
trigger_active : bool
Indicate whether tally triggers are used
trigger_batch_interval : int
Number of batches in between convergence checks
trigger_max_batches : int
Maximum number of batches simulated. If this is set, the number of
batches specified via ``batches`` is interpreted as the minimum number
of batches
ufs_mesh : openmc.RegularMesh
Mesh to be used for redistributing source sites via the uniform fision
site (UFS) method.
verbosity : int
Verbosity during simulation between 1 and 10. Verbosity levels are
described in :ref:`verbosity`.
volume_calculations : VolumeCalculation or iterable of VolumeCalculation
Stochastic volume calculation specifications
"""
def __init__(self):
# Run mode subelement (default is 'eigenvalue')
self._run_mode = 'eigenvalue'
self._batches = None
self._generations_per_batch = None
self._inactive = None
self._max_lost_particles = None
self._rel_max_lost_particles = None
self._particles = None
self._keff_trigger = None
# Energy mode subelement
self._energy_mode = None
self._max_order = None
# Source subelement
self._source = cv.CheckedList(Source, 'source distributions')
self._confidence_intervals = None
self._electron_treatment = None
self._photon_transport = None
self._ptables = None
self._seed = None
self._survival_biasing = None
# Shannon entropy mesh
self._entropy_mesh = None
# Trigger subelement
self._trigger_active = None
self._trigger_max_batches = None
self._trigger_batch_interval = None
self._output = None
# Output options
self._statepoint = {}
self._sourcepoint = {}
self._no_reduce = None
self._verbosity = None
self._trace = None
self._track = None
self._tabular_legendre = {}
self._temperature = {}
# Cutoff subelement
self._cutoff = None
# Uniform fission source subelement
self._ufs_mesh = None
self._resonance_scattering = {}
self._volume_calculations = cv.CheckedList(
VolumeCalculation, 'volume calculations')
self._create_fission_neutrons = None
self._delayed_photon_scaling = None
self._material_cell_offsets = None
self._log_grid_bins = None
self._dagmc = False
self._event_based = None
self._max_particles_in_flight = None
@property
def run_mode(self):
return self._run_mode
@property
def batches(self):
return self._batches
@property
def generations_per_batch(self):
return self._generations_per_batch
@property
def inactive(self):
return self._inactive
@property
def max_lost_particles(self):
return self._max_lost_particles
@property
def rel_max_lost_particles(self):
return self._rel_max_lost_particles
@property
def particles(self):
return self._particles
@property
def keff_trigger(self):
return self._keff_trigger
@property
def energy_mode(self):
return self._energy_mode
@property
def max_order(self):
return self._max_order
@property
def source(self):
return self._source
@property
def confidence_intervals(self):
return self._confidence_intervals
@property
def electron_treatment(self):
return self._electron_treatment
@property
def ptables(self):
return self._ptables
@property
def photon_transport(self):
return self._photon_transport
@property
def seed(self):
return self._seed
@property
def survival_biasing(self):
return self._survival_biasing
@property
def entropy_mesh(self):
return self._entropy_mesh
@property
def trigger_active(self):
return self._trigger_active
@property
def trigger_max_batches(self):
return self._trigger_max_batches
@property
def trigger_batch_interval(self):
return self._trigger_batch_interval
@property
def output(self):
return self._output
@property
def sourcepoint(self):
return self._sourcepoint
@property
def statepoint(self):
return self._statepoint
@property
def no_reduce(self):
return self._no_reduce
@property
def verbosity(self):
return self._verbosity
@property
def tabular_legendre(self):
return self._tabular_legendre
@property
def temperature(self):
return self._temperature
@property
def trace(self):
return self._trace
@property
def track(self):
return self._track
@property
def cutoff(self):
return self._cutoff
@property
def ufs_mesh(self):
return self._ufs_mesh
@property
def resonance_scattering(self):
return self._resonance_scattering
@property
def volume_calculations(self):
return self._volume_calculations
@property
def create_fission_neutrons(self):
return self._create_fission_neutrons
@property
def delayed_photon_scaling(self):
return self._delayed_photon_scaling
@property
def material_cell_offsets(self):
return self._material_cell_offsets
@property
def log_grid_bins(self):
return self._log_grid_bins
@property
def dagmc(self):
return self._dagmc
@property
def event_based(self):
return self._event_based
@property
def max_particles_in_flight(self):
return self._max_particles_in_flight
@run_mode.setter
def run_mode(self, run_mode):
cv.check_value('run mode', run_mode, _RUN_MODES)
self._run_mode = run_mode
@batches.setter
def batches(self, batches):
cv.check_type('batches', batches, Integral)
cv.check_greater_than('batches', batches, 0)
self._batches = batches
@generations_per_batch.setter
def generations_per_batch(self, generations_per_batch):
cv.check_type('generations per patch', generations_per_batch, Integral)
cv.check_greater_than('generations per batch', generations_per_batch, 0)
self._generations_per_batch = generations_per_batch
@inactive.setter
def inactive(self, inactive):
cv.check_type('inactive batches', inactive, Integral)
cv.check_greater_than('inactive batches', inactive, 0, True)
self._inactive = inactive
@max_lost_particles.setter
def max_lost_particles(self, max_lost_particles):
cv.check_type('max_lost_particles', max_lost_particles, Integral)
cv.check_greater_than('max_lost_particles', max_lost_particles, 0)
self._max_lost_particles = max_lost_particles
@rel_max_lost_particles.setter
def rel_max_lost_particles(self, rel_max_lost_particles):
cv.check_type('rel_max_lost_particles', rel_max_lost_particles, Real)
cv.check_greater_than('rel_max_lost_particles', rel_max_lost_particles, 0)
cv.check_less_than('rel_max_lost_particles', rel_max_lost_particles, 1)
self._rel_max_lost_particles = rel_max_lost_particles
@particles.setter
def particles(self, particles):
cv.check_type('particles', particles, Integral)
cv.check_greater_than('particles', particles, 0)
self._particles = particles
@keff_trigger.setter
def keff_trigger(self, keff_trigger):
if not isinstance(keff_trigger, dict):
msg = 'Unable to set a trigger on keff from "{0}" which ' \
'is not a Python dictionary'.format(keff_trigger)
raise ValueError(msg)
elif 'type' not in keff_trigger:
msg = 'Unable to set a trigger on keff from "{0}" which ' \
'does not have a "type" key'.format(keff_trigger)
raise ValueError(msg)
elif keff_trigger['type'] not in ['variance', 'std_dev', 'rel_err']:
msg = 'Unable to set a trigger on keff with ' \
'type "{0}"'.format(keff_trigger['type'])
raise ValueError(msg)
elif 'threshold' not in keff_trigger:
msg = 'Unable to set a trigger on keff from "{0}" which ' \
'does not have a "threshold" key'.format(keff_trigger)
raise ValueError(msg)
elif not isinstance(keff_trigger['threshold'], Real):
msg = 'Unable to set a trigger on keff with ' \
'threshold "{0}"'.format(keff_trigger['threshold'])
raise ValueError(msg)
self._keff_trigger = keff_trigger
@energy_mode.setter
def energy_mode(self, energy_mode):
cv.check_value('energy mode', energy_mode,
['continuous-energy', 'multi-group'])
self._energy_mode = energy_mode
@max_order.setter
def max_order(self, max_order):
if max_order is not None:
cv.check_type('maximum scattering order', max_order, Integral)
cv.check_greater_than('maximum scattering order', max_order, 0,
True)
self._max_order = max_order
@source.setter
def source(self, source):
if not isinstance(source, MutableSequence):
source = [source]
self._source = cv.CheckedList(Source, 'source distributions', source)
@output.setter
def output(self, output):
cv.check_type('output', output, Mapping)
for key, value in output.items():
cv.check_value('output key', key, ('summary', 'tallies', 'path'))
if key in ('summary', 'tallies'):
cv.check_type("output['{}']".format(key), value, bool)
else:
cv.check_type("output['path']", value, str)
self._output = output
@verbosity.setter
def verbosity(self, verbosity):
cv.check_type('verbosity', verbosity, Integral)
cv.check_greater_than('verbosity', verbosity, 1, True)
cv.check_less_than('verbosity', verbosity, 10, True)
self._verbosity = verbosity
@sourcepoint.setter
def sourcepoint(self, sourcepoint):
cv.check_type('sourcepoint options', sourcepoint, Mapping)
for key, value in sourcepoint.items():
if key == 'batches':
cv.check_type('sourcepoint batches', value, Iterable, Integral)
for batch in value:
cv.check_greater_than('sourcepoint batch', batch, 0)
elif key == 'separate':
cv.check_type('sourcepoint separate', value, bool)
elif key == 'write':
cv.check_type('sourcepoint write', value, bool)
elif key == 'overwrite':
cv.check_type('sourcepoint overwrite', value, bool)
else:
raise ValueError("Unknown key '{}' encountered when setting "
"sourcepoint options.".format(key))
self._sourcepoint = sourcepoint
@statepoint.setter
def statepoint(self, statepoint):
cv.check_type('statepoint options', statepoint, Mapping)
for key, value in statepoint.items():
if key == 'batches':
cv.check_type('statepoint batches', value, Iterable, Integral)
for batch in value:
cv.check_greater_than('statepoint batch', batch, 0)
else:
raise ValueError("Unknown key '{}' encountered when setting "
"statepoint options.".format(key))
self._statepoint = statepoint
@confidence_intervals.setter
def confidence_intervals(self, confidence_intervals):
cv.check_type('confidence interval', confidence_intervals, bool)
self._confidence_intervals = confidence_intervals
@electron_treatment.setter
def electron_treatment(self, electron_treatment):
cv.check_value('electron treatment', electron_treatment, ['led', 'ttb'])
self._electron_treatment = electron_treatment
@photon_transport.setter
def photon_transport(self, photon_transport):
cv.check_type('photon transport', photon_transport, bool)
self._photon_transport = photon_transport
@dagmc.setter
def dagmc(self, dagmc):
cv.check_type('dagmc geometry', dagmc, bool)
self._dagmc = dagmc
@ptables.setter
def ptables(self, ptables):
cv.check_type('probability tables', ptables, bool)
self._ptables = ptables
@seed.setter
def seed(self, seed):
cv.check_type('random number generator seed', seed, Integral)
cv.check_greater_than('random number generator seed', seed, 0)
self._seed = seed
@survival_biasing.setter
def survival_biasing(self, survival_biasing):
cv.check_type('survival biasing', survival_biasing, bool)
self._survival_biasing = survival_biasing
@cutoff.setter
def cutoff(self, cutoff):
if not isinstance(cutoff, Mapping):
msg = 'Unable to set cutoff from "{0}" which is not a '\
' Python dictionary'.format(cutoff)
raise ValueError(msg)
for key in cutoff:
if key == 'weight':
cv.check_type('weight cutoff', cutoff[key], Real)
cv.check_greater_than('weight cutoff', cutoff[key], 0.0)
elif key == 'weight_avg':
cv.check_type('average survival weight', cutoff[key], Real)
cv.check_greater_than('average survival weight',
cutoff[key], 0.0)
elif key in ['energy_neutron', 'energy_photon', 'energy_electron',
'energy_positron']:
cv.check_type('energy cutoff', cutoff[key], Real)
cv.check_greater_than('energy cutoff', cutoff[key], 0.0)
else:
msg = 'Unable to set cutoff to "{0}" which is unsupported by '\
'OpenMC'.format(key)
self._cutoff = cutoff
@entropy_mesh.setter
def entropy_mesh(self, entropy):
cv.check_type('entropy mesh', entropy, RegularMesh)
if entropy.dimension:
cv.check_length('entropy mesh dimension', entropy.dimension, 3)
cv.check_length('entropy mesh lower-left corner', entropy.lower_left, 3)
cv.check_length('entropy mesh upper-right corner', entropy.upper_right, 3)
self._entropy_mesh = entropy
@trigger_active.setter
def trigger_active(self, trigger_active):
cv.check_type('trigger active', trigger_active, bool)
self._trigger_active = trigger_active
@trigger_max_batches.setter
def trigger_max_batches(self, trigger_max_batches):
cv.check_type('trigger maximum batches', trigger_max_batches, Integral)
cv.check_greater_than('trigger maximum batches', trigger_max_batches, 0)
self._trigger_max_batches = trigger_max_batches
@trigger_batch_interval.setter
def trigger_batch_interval(self, trigger_batch_interval):
cv.check_type('trigger batch interval', trigger_batch_interval, Integral)
cv.check_greater_than('trigger batch interval', trigger_batch_interval, 0)
self._trigger_batch_interval = trigger_batch_interval
@no_reduce.setter
def no_reduce(self, no_reduce):
cv.check_type('no reduction option', no_reduce, bool)
self._no_reduce = no_reduce
@tabular_legendre.setter
def tabular_legendre(self, tabular_legendre):
cv.check_type('tabular_legendre settings', tabular_legendre, Mapping)
for key, value in tabular_legendre.items():
cv.check_value('tabular_legendre key', key,
['enable', 'num_points'])
if key == 'enable':
cv.check_type('enable tabular_legendre', value, bool)
elif key == 'num_points':
cv.check_type('num_points tabular_legendre', value, Integral)
cv.check_greater_than('num_points tabular_legendre', value, 0)
self._tabular_legendre = tabular_legendre
@temperature.setter
def temperature(self, temperature):
cv.check_type('temperature settings', temperature, Mapping)
for key, value in temperature.items():
cv.check_value('temperature key', key,
['default', 'method', 'tolerance', 'multipole',
'range'])
if key == 'default':
cv.check_type('default temperature', value, Real)
elif key == 'method':
cv.check_value('temperature method', value,
['nearest', 'interpolation'])
elif key == 'tolerance':
cv.check_type('temperature tolerance', value, Real)
elif key == 'multipole':
cv.check_type('temperature multipole', value, bool)
elif key == 'range':
cv.check_length('temperature range', value, 2)
for T in value:
cv.check_type('temperature', T, Real)
self._temperature = temperature
@trace.setter
def trace(self, trace):
cv.check_type('trace', trace, Iterable, Integral)
cv.check_length('trace', trace, 3)
cv.check_greater_than('trace batch', trace[0], 0)
cv.check_greater_than('trace generation', trace[1], 0)
cv.check_greater_than('trace particle', trace[2], 0)
self._trace = trace
@track.setter
def track(self, track):
cv.check_type('track', track, Iterable, Integral)
if len(track) % 3 != 0:
msg = 'Unable to set the track to "{0}" since its length is ' \
'not a multiple of 3'.format(track)
raise ValueError(msg)
for t in zip(track[::3], track[1::3], track[2::3]):
cv.check_greater_than('track batch', t[0], 0)
cv.check_greater_than('track generation', t[0], 0)
cv.check_greater_than('track particle', t[0], 0)
self._track = track
@ufs_mesh.setter
def ufs_mesh(self, ufs_mesh):
cv.check_type('UFS mesh', ufs_mesh, RegularMesh)
cv.check_length('UFS mesh dimension', ufs_mesh.dimension, 3)
cv.check_length('UFS mesh lower-left corner', ufs_mesh.lower_left, 3)
cv.check_length('UFS mesh upper-right corner', ufs_mesh.upper_right, 3)
self._ufs_mesh = ufs_mesh
@resonance_scattering.setter
def resonance_scattering(self, res):
cv.check_type('resonance scattering settings', res, Mapping)
keys = ('enable', 'method', 'energy_min', 'energy_max', 'nuclides')
for key, value in res.items():
cv.check_value('resonance scattering dictionary key', key, keys)
if key == 'enable':
cv.check_type('resonance scattering enable', value, bool)
elif key == 'method':
cv.check_value('resonance scattering method', value,
_RES_SCAT_METHODS)
elif key == 'energy_min':
name = 'resonance scattering minimum energy'
cv.check_type(name, value, Real)
cv.check_greater_than(name, value, 0)
elif key == 'energy_max':
name = 'resonance scattering minimum energy'
cv.check_type(name, value, Real)
cv.check_greater_than(name, value, 0)
elif key == 'nuclides':
cv.check_type('resonance scattering nuclides', value,
Iterable, str)
self._resonance_scattering = res
@volume_calculations.setter
def volume_calculations(self, vol_calcs):
if not isinstance(vol_calcs, MutableSequence):
vol_calcs = [vol_calcs]
self._volume_calculations = cv.CheckedList(
VolumeCalculation, 'stochastic volume calculations', vol_calcs)
@create_fission_neutrons.setter
def create_fission_neutrons(self, create_fission_neutrons):
cv.check_type('Whether create fission neutrons',
create_fission_neutrons, bool)
self._create_fission_neutrons = create_fission_neutrons
@delayed_photon_scaling.setter
def delayed_photon_scaling(self, value):
cv.check_type('delayed photon scaling', value, bool)
self._delayed_photon_scaling = value
@event_based.setter
def event_based(self, value):
cv.check_type('event based', value, bool)
self._event_based = value
@max_particles_in_flight.setter
def max_particles_in_flight(self, value):
cv.check_type('max particles in flight', value, Integral)
cv.check_greater_than('max particles in flight', value, 0)
self._max_particles_in_flight = value
@material_cell_offsets.setter
def material_cell_offsets(self, value):
cv.check_type('material cell offsets', value, bool)
self._material_cell_offsets = value
@log_grid_bins.setter
def log_grid_bins(self, log_grid_bins):
cv.check_type('log grid bins', log_grid_bins, Real)
cv.check_greater_than('log grid bins', log_grid_bins, 0)
self._log_grid_bins = log_grid_bins
def _create_run_mode_subelement(self, root):
elem = ET.SubElement(root, "run_mode")
elem.text = self._run_mode
def _create_batches_subelement(self, root):
if self._batches is not None:
element = ET.SubElement(root, "batches")
element.text = str(self._batches)
def _create_generations_per_batch_subelement(self, root):
if self._generations_per_batch is not None:
element = ET.SubElement(root, "generations_per_batch")
element.text = str(self._generations_per_batch)
def _create_inactive_subelement(self, root):
if self._inactive is not None:
element = ET.SubElement(root, "inactive")
element.text = str(self._inactive)
def _create_max_lost_particles_subelement(self, root):
if self._max_lost_particles is not None:
element = ET.SubElement(root, "max_lost_particles")
element.text = str(self._max_lost_particles)
def _create_rel_max_lost_particles_subelement(self, root):
if self._rel_max_lost_particles is not None:
element = ET.SubElement(root, "rel_max_lost_particles")
element.text = str(self._rel_max_lost_particles)
def _create_particles_subelement(self, root):
if self._particles is not None:
element = ET.SubElement(root, "particles")
element.text = str(self._particles)
def _create_keff_trigger_subelement(self, root):
if self._keff_trigger is not None:
element = ET.SubElement(root, "keff_trigger")
for key in self._keff_trigger:
subelement = ET.SubElement(element, key)
subelement.text = str(self._keff_trigger[key]).lower()
def _create_energy_mode_subelement(self, root):
if self._energy_mode is not None:
element = ET.SubElement(root, "energy_mode")
element.text = str(self._energy_mode)
def _create_max_order_subelement(self, root):
if self._max_order is not None:
element = ET.SubElement(root, "max_order")
element.text = str(self._max_order)
def _create_source_subelement(self, root):
for source in self.source:
root.append(source.to_xml_element())
def _create_volume_calcs_subelement(self, root):
for calc in self.volume_calculations:
root.append(calc.to_xml_element())
def _create_output_subelement(self, root):
if self._output is not None:
element = ET.SubElement(root, "output")
for key, value in self._output.items():
subelement = ET.SubElement(element, key)
if key in ('summary', 'tallies'):
subelement.text = str(value).lower()
else:
subelement.text = value
def _create_verbosity_subelement(self, root):
if self._verbosity is not None:
element = ET.SubElement(root, "verbosity")
element.text = str(self._verbosity)
def _create_statepoint_subelement(self, root):
if self._statepoint:
element = ET.SubElement(root, "state_point")
if 'batches' in self._statepoint:
subelement = ET.SubElement(element, "batches")
subelement.text = ' '.join(
str(x) for x in self._statepoint['batches'])
def _create_sourcepoint_subelement(self, root):
if self._sourcepoint:
element = ET.SubElement(root, "source_point")
if 'batches' in self._sourcepoint:
subelement = ET.SubElement(element, "batches")
subelement.text = ' '.join(
str(x) for x in self._sourcepoint['batches'])
if 'separate' in self._sourcepoint:
subelement = ET.SubElement(element, "separate")
subelement.text = str(self._sourcepoint['separate']).lower()
if 'write' in self._sourcepoint:
subelement = ET.SubElement(element, "write")
subelement.text = str(self._sourcepoint['write']).lower()
# Overwrite latest subelement
if 'overwrite' in self._sourcepoint:
subelement = ET.SubElement(element, "overwrite_latest")
subelement.text = str(self._sourcepoint['overwrite']).lower()
def _create_confidence_intervals(self, root):
if self._confidence_intervals is not None:
element = ET.SubElement(root, "confidence_intervals")
element.text = str(self._confidence_intervals).lower()
def _create_electron_treatment_subelement(self, root):
if self._electron_treatment is not None:
element = ET.SubElement(root, "electron_treatment")
element.text = str(self._electron_treatment)
def _create_photon_transport_subelement(self, root):
if self._photon_transport is not None:
element = ET.SubElement(root, "photon_transport")
element.text = str(self._photon_transport).lower()
def _create_ptables_subelement(self, root):
if self._ptables is not None:
element = ET.SubElement(root, "ptables")
element.text = str(self._ptables).lower()
def _create_seed_subelement(self, root):
if self._seed is not None:
element = ET.SubElement(root, "seed")
element.text = str(self._seed)
def _create_survival_biasing_subelement(self, root):
if self._survival_biasing is not None:
element = ET.SubElement(root, "survival_biasing")
element.text = str(self._survival_biasing).lower()
def _create_cutoff_subelement(self, root):
if self._cutoff is not None:
element = ET.SubElement(root, "cutoff")
for key, value in self._cutoff.items():
subelement = ET.SubElement(element, key)
subelement.text = str(value)
def _create_entropy_mesh_subelement(self, root):
if self.entropy_mesh is not None:
# See if a <mesh> element already exists -- if not, add it
path = "./mesh[@id='{}']".format(self.entropy_mesh.id)
if root.find(path) is None:
root.append(self.entropy_mesh.to_xml_element())
subelement = ET.SubElement(root, "entropy_mesh")
subelement.text = str(self.entropy_mesh.id)
def _create_trigger_subelement(self, root):
if self._trigger_active is not None:
trigger_element = ET.SubElement(root, "trigger")
element = ET.SubElement(trigger_element, "active")
element.text = str(self._trigger_active).lower()
if self._trigger_max_batches is not None:
element = ET.SubElement(trigger_element, "max_batches")
element.text = str(self._trigger_max_batches)
if self._trigger_batch_interval is not None:
element = ET.SubElement(trigger_element, "batch_interval")
element.text = str(self._trigger_batch_interval)
def _create_no_reduce_subelement(self, root):
if self._no_reduce is not None:
element = ET.SubElement(root, "no_reduce")
element.text = str(self._no_reduce).lower()
def _create_tabular_legendre_subelements(self, root):
if self.tabular_legendre:
element = ET.SubElement(root, "tabular_legendre")
subelement = ET.SubElement(element, "enable")
subelement.text = str(self._tabular_legendre['enable']).lower()
if 'num_points' in self._tabular_legendre:
subelement = ET.SubElement(element, "num_points")
subelement.text = str(self._tabular_legendre['num_points'])
def _create_temperature_subelements(self, root):
if self.temperature:
for key, value in sorted(self.temperature.items()):
element = ET.SubElement(root,
"temperature_{}".format(key))
if isinstance(value, bool):
element.text = str(value).lower()
elif key == 'range':
element.text = ' '.join(str(T) for T in value)
else:
element.text = str(value)
def _create_trace_subelement(self, root):
if self._trace is not None:
element = ET.SubElement(root, "trace")
element.text = ' '.join(map(str, self._trace))
def _create_track_subelement(self, root):
if self._track is not None:
element = ET.SubElement(root, "track")
element.text = ' '.join(map(str, self._track))
def _create_ufs_mesh_subelement(self, root):
if self.ufs_mesh is not None:
# See if a <mesh> element already exists -- if not, add it
path = "./mesh[@id='{}']".format(self.ufs_mesh.id)
if root.find(path) is None:
root.append(self.ufs_mesh.to_xml_element())
subelement = ET.SubElement(root, "ufs_mesh")
subelement.text = str(self.ufs_mesh.id)
def _create_resonance_scattering_subelement(self, root):
res = self.resonance_scattering
if res:
elem = ET.SubElement(root, 'resonance_scattering')
if 'enable' in res:
subelem = ET.SubElement(elem, 'enable')
subelem.text = str(res['enable']).lower()
if 'method' in res:
subelem = ET.SubElement(elem, 'method')
subelem.text = res['method']
if 'energy_min' in res:
subelem = ET.SubElement(elem, 'energy_min')
subelem.text = str(res['energy_min'])
if 'energy_max' in res:
subelem = ET.SubElement(elem, 'energy_max')
subelem.text = str(res['energy_max'])
if 'nuclides' in res:
subelem = ET.SubElement(elem, 'nuclides')
subelem.text = ' '.join(res['nuclides'])
def _create_create_fission_neutrons_subelement(self, root):
if self._create_fission_neutrons is not None:
elem = ET.SubElement(root, "create_fission_neutrons")
elem.text = str(self._create_fission_neutrons).lower()
def _create_delayed_photon_scaling_subelement(self, root):
if self._delayed_photon_scaling is not None:
elem = ET.SubElement(root, "delayed_photon_scaling")
elem.text = str(self._delayed_photon_scaling).lower()
def _create_event_based_subelement(self, root):
if self._event_based is not None:
elem = ET.SubElement(root, "event_based")
elem.text = str(self._event_based).lower()
def _create_max_particles_in_flight_subelement(self, root):
if self._max_particles_in_flight is not None:
elem = ET.SubElement(root, "max_particles_in_flight")
elem.text = str(self._max_particles_in_flight).lower()
def _create_material_cell_offsets_subelement(self, root):
if self._material_cell_offsets is not None:
elem = ET.SubElement(root, "material_cell_offsets")
elem.text = str(self._material_cell_offsets).lower()
def _create_log_grid_bins_subelement(self, root):
if self._log_grid_bins is not None:
elem = ET.SubElement(root, "log_grid_bins")
elem.text = str(self._log_grid_bins)
def _create_dagmc_subelement(self, root):
if self._dagmc:
elem = ET.SubElement(root, "dagmc")
elem.text = str(self._dagmc).lower()
def _eigenvalue_from_xml_element(self, root):
elem = root.find('eigenvalue')
if elem is not None:
self._run_mode_from_xml_element(elem)
self._particles_from_xml_element(elem)
self._batches_from_xml_element(elem)
self._inactive_from_xml_element(elem)
self._max_lost_particles_from_xml_element(elem)
self._rel_max_lost_particles_from_xml_element(elem)
self._generations_per_batch_from_xml_element(elem)
def _run_mode_from_xml_element(self, root):
text = get_text(root, 'run_mode')
if text is not None:
self.run_mode = text
def _particles_from_xml_element(self, root):
text = get_text(root, 'particles')
if text is not None:
self.particles = int(text)
def _batches_from_xml_element(self, root):
text = get_text(root, 'batches')
if text is not None:
self.batches = int(text)
def _inactive_from_xml_element(self, root):
text = get_text(root, 'inactive')
if text is not None:
self.inactive = int(text)
def _max_lost_particles_from_xml_element(self, root):
text = get_text(root, 'max_lost_particles')
if text is not None:
self.max_lost_particles = int(text)
def _rel_max_lost_particles_from_xml_element(self, root):
text = get_text(root, 'rel_max_lost_particles')
if text is not None:
self.rel_max_lost_particles = float(text)
def _generations_per_batch_from_xml_element(self, root):
text = get_text(root, 'generations_per_batch')
if text is not None:
self.generations_per_batch = int(text)
def _keff_trigger_from_xml_element(self, root):
elem = root.find('keff_trigger')
if elem is not None:
trigger = get_text(elem, 'type')
threshold = float(get_text(elem, 'threshold'))
self.keff_trigger = {'type': trigger, 'threshold': threshold}
def _source_from_xml_element(self, root):
for elem in root.findall('source'):
self.source.append(Source.from_xml_element(elem))
def _output_from_xml_element(self, root):
elem = root.find('output')
if elem is not None:
self.output = {}
for key in ('summary', 'tallies', 'path'):
value = get_text(elem, key)
if value is not None:
if key in ('summary', 'tallies'):
value = value in ('true', '1')
self.output[key] = value
def _statepoint_from_xml_element(self, root):
elem = root.find('state_point')
if elem is not None:
text = get_text(elem, 'batches')
if text is not None:
self.statepoint['batches'] = [int(x) for x in text.split()]
def _sourcepoint_from_xml_element(self, root):
elem = root.find('source_point')
if elem is not None:
for key in ('separate', 'write', 'overwrite_latest', 'batches'):
value = get_text(elem, key)
if value is not None:
if key in ('separate', 'write'):
value = value in ('true', '1')
elif key == 'overwrite_latest':
value = value in ('true', '1')
key = 'overwrite'
else:
value = [int(x) for x in value.split()]
self.sourcepoint[key] = value
def _confidence_intervals_from_xml_element(self, root):
text = get_text(root, 'confidence_intervals')
if text is not None:
self.confidence_intervals = text in ('true', '1')
def _electron_treatment_from_xml_element(self, root):
text = get_text(root, 'electron_treatment')
if text is not None:
self.electron_treatment = text
def _energy_mode_from_xml_element(self, root):
text = get_text(root, 'energy_mode')
if text is not None:
self.energy_mode = text
def _max_order_from_xml_element(self, root):
text = get_text(root, 'max_order')
if text is not None:
self.max_order = int(text)
def _photon_transport_from_xml_element(self, root):
text = get_text(root, 'photon_transport')
if text is not None:
self.photon_transport = text in ('true', '1')
def _ptables_from_xml_element(self, root):
text = get_text(root, 'ptables')
if text is not None:
self.ptables = text in ('true', '1')
def _seed_from_xml_element(self, root):
text = get_text(root, 'seed')
if text is not None:
self.seed = int(text)
def _survival_biasing_from_xml_element(self, root):
text = get_text(root, 'survival_biasing')
if text is not None:
self.survival_biasing = text in ('true', '1')
def _cutoff_from_xml_element(self, root):
elem = root.find('cutoff')
if elem is not None:
self.cutoff = {}
for key in ('energy_neutron', 'energy_photon', 'energy_electron',
'energy_positron', 'weight', 'weight_avg'):
value = get_text(elem, key)
if value is not None:
self.cutoff[key] = float(value)
def _entropy_mesh_from_xml_element(self, root):
text = get_text(root, 'entropy_mesh')
if text is not None:
path = "./mesh[@id='{}']".format(int(text))
elem = root.find(path)
if elem is not None:
self.entropy_mesh = RegularMesh.from_xml_element(elem)
def _trigger_from_xml_element(self, root):
elem = root.find('trigger')
if elem is not None:
self.trigger_active = get_text(elem, 'active') in ('true', '1')
text = get_text(elem, 'max_batches')
if text is not None:
self.trigger_max_batches = int(text)
text = get_text(elem, 'batch_interval')
if text is not None:
self.trigger_batch_interval = int(text)
def _no_reduce_from_xml_element(self, root):
text = get_text(root, 'no_reduce')
if text is not None:
self.no_reduce = text in ('true', '1')
def _verbosity_from_xml_element(self, root):
text = get_text(root, 'verbosity')
if text is not None:
self.verbosity = int(text)
def _tabular_legendre_from_xml_element(self, root):
elem = root.find('tabular_legendre')
if elem is not None:
text = get_text(elem, 'enable')
self.tabular_legendre['enable'] = text in ('true', '1')
text = get_text(elem, 'num_points')
if text is not None:
self.tabular_legendre['num_points'] = int(text)
def _temperature_from_xml_element(self, root):
text = get_text(root, 'temperature_default')
if text is not None:
self.temperature['default'] = float(text)
text = get_text(root, 'temperature_tolerance')
if text is not None:
self.temperature['tolerance'] = float(text)
text = get_text(root, 'temperature_method')
if text is not None:
self.temperature['method'] = text
text = get_text(root, 'temperature_range')
if text is not None:
self.temperature['range'] = [float(x) for x in text.split()]
text = get_text(root, 'temperature_multipole')
if text is not None:
self.temperature['multipole'] = text in ('true', '1')
def _trace_from_xml_element(self, root):
text = get_text(root, 'trace')
if text is not None:
self.trace = [int(x) for x in text.split()]
def _track_from_xml_element(self, root):
text = get_text(root, 'track')
if text is not None:
self.track = [int(x) for x in text.split()]
def _ufs_mesh_from_xml_element(self, root):
text = get_text(root, 'ufs_mesh')
if text is not None:
path = "./mesh[@id='{}']".format(int(text))
elem = root.find(path)
if elem is not None:
self.ufs_mesh = RegularMesh.from_xml_element(elem)
def _resonance_scattering_from_xml_element(self, root):
elem = root.find('resonance_scattering')
if elem is not None:
keys = ('enable', 'method', 'energy_min', 'energy_max', 'nuclides')
for key in keys:
value = get_text(elem, key)
if value is not None:
if key == 'enable':
value = value in ('true', '1')
elif key in ('energy_min', 'energy_max'):
value = float(value)
elif key == 'nuclides':
value = value.split()
self.resonance_scattering[key] = value
def _create_fission_neutrons_from_xml_element(self, root):
text = get_text(root, 'create_fission_neutrons')
if text is not None:
self.create_fission_neutrons = text in ('true', '1')
def _delayed_photon_scaling_from_xml_element(self, root):
text = get_text(root, 'delayed_photon_scaling')
if text is not None:
self.delayed_photon_scaling = text in ('true', '1')
def _event_based_from_xml_element(self, root):
text = get_text(root, 'event_based')
if text is not None:
self.event_based = text in ('true', '1')
def _max_particles_in_flight_from_xml_element(self, root):
text = get_text(root, 'max_particles_in_flight')
if text is not None:
self.max_particles_in_flight = int(text)
def _material_cell_offsets_from_xml_element(self, root):
text = get_text(root, 'material_cell_offsets')
if text is not None:
self.material_cell_offsets = text in ('true', '1')
def _log_grid_bins_from_xml_element(self, root):
text = get_text(root, 'log_grid_bins')
if text is not None:
self.log_grid_bins = int(text)
def _dagmc_from_xml_element(self, root):
text = get_text(root, 'dagmc')
if text is not None:
self.dagmc = text in ('true', '1')
def export_to_xml(self, path='settings.xml'):
"""Export simulation settings to an XML file.
Parameters
----------
path : str
Path to file to write. Defaults to 'settings.xml'.
"""
# Reset xml element tree
root_element = ET.Element("settings")
self._create_run_mode_subelement(root_element)
self._create_particles_subelement(root_element)
self._create_batches_subelement(root_element)
self._create_inactive_subelement(root_element)
self._create_max_lost_particles_subelement(root_element)
self._create_rel_max_lost_particles_subelement(root_element)
self._create_generations_per_batch_subelement(root_element)
self._create_keff_trigger_subelement(root_element)
self._create_source_subelement(root_element)
self._create_output_subelement(root_element)
self._create_statepoint_subelement(root_element)
self._create_sourcepoint_subelement(root_element)
self._create_confidence_intervals(root_element)
self._create_electron_treatment_subelement(root_element)
self._create_energy_mode_subelement(root_element)
self._create_max_order_subelement(root_element)
self._create_photon_transport_subelement(root_element)
self._create_ptables_subelement(root_element)
self._create_seed_subelement(root_element)
self._create_survival_biasing_subelement(root_element)
self._create_cutoff_subelement(root_element)
self._create_entropy_mesh_subelement(root_element)
self._create_trigger_subelement(root_element)
self._create_no_reduce_subelement(root_element)
self._create_verbosity_subelement(root_element)
self._create_tabular_legendre_subelements(root_element)
self._create_temperature_subelements(root_element)
self._create_trace_subelement(root_element)
self._create_track_subelement(root_element)
self._create_ufs_mesh_subelement(root_element)
self._create_resonance_scattering_subelement(root_element)
self._create_volume_calcs_subelement(root_element)
self._create_create_fission_neutrons_subelement(root_element)
self._create_delayed_photon_scaling_subelement(root_element)
self._create_event_based_subelement(root_element)
self._create_max_particles_in_flight_subelement(root_element)
self._create_material_cell_offsets_subelement(root_element)
self._create_log_grid_bins_subelement(root_element)
self._create_dagmc_subelement(root_element)
# Clean the indentation in the file to be user-readable
clean_indentation(root_element)
# Check if path is a directory
p = Path(path)
if p.is_dir():
p /= 'settings.xml'
# Write the XML Tree to the settings.xml file
tree = ET.ElementTree(root_element)
tree.write(str(p), xml_declaration=True, encoding='utf-8')
@classmethod
def from_xml(cls, path='settings.xml'):
"""Generate settings from XML file
Parameters
----------
path : str, optional
Path to settings XML file
Returns
-------
openmc.Settings
Settings object
"""
tree = ET.parse(path)
root = tree.getroot()
settings = cls()
settings._eigenvalue_from_xml_element(root)
settings._run_mode_from_xml_element(root)
settings._particles_from_xml_element(root)
settings._batches_from_xml_element(root)
settings._inactive_from_xml_element(root)
settings._max_lost_particles_from_xml_element(root)
settings._rel_max_lost_particles_from_xml_element(root)
settings._generations_per_batch_from_xml_element(root)
settings._keff_trigger_from_xml_element(root)
settings._source_from_xml_element(root)
settings._output_from_xml_element(root)
settings._statepoint_from_xml_element(root)
settings._sourcepoint_from_xml_element(root)
settings._confidence_intervals_from_xml_element(root)
settings._electron_treatment_from_xml_element(root)
settings._energy_mode_from_xml_element(root)
settings._max_order_from_xml_element(root)
settings._photon_transport_from_xml_element(root)
settings._ptables_from_xml_element(root)
settings._seed_from_xml_element(root)
settings._survival_biasing_from_xml_element(root)
settings._cutoff_from_xml_element(root)
settings._entropy_mesh_from_xml_element(root)
settings._trigger_from_xml_element(root)
settings._no_reduce_from_xml_element(root)
settings._verbosity_from_xml_element(root)
settings._tabular_legendre_from_xml_element(root)
settings._temperature_from_xml_element(root)
settings._trace_from_xml_element(root)
settings._track_from_xml_element(root)
settings._ufs_mesh_from_xml_element(root)
settings._resonance_scattering_from_xml_element(root)
settings._create_fission_neutrons_from_xml_element(root)
settings._delayed_photon_scaling_from_xml_element(root)
settings._event_based_from_xml_element(root)
settings._max_particles_in_flight_from_xml_element(root)
settings._material_cell_offsets_from_xml_element(root)
settings._log_grid_bins_from_xml_element(root)
settings._dagmc_from_xml_element(root)
# TODO: Get volume calculations
return settings
| 39.833101
| 89
| 0.641276
|
794c980fbca3983f92f4c1ecc44bf1122316d515
| 6,279
|
py
|
Python
|
venus/solver/milp_solver.py
|
vas-group-imperial/venus2
|
c0fa7f095a0b3fdaff93fc5e7d948035fae6412a
|
[
"BSD-2-Clause"
] | null | null | null |
venus/solver/milp_solver.py
|
vas-group-imperial/venus2
|
c0fa7f095a0b3fdaff93fc5e7d948035fae6412a
|
[
"BSD-2-Clause"
] | null | null | null |
venus/solver/milp_solver.py
|
vas-group-imperial/venus2
|
c0fa7f095a0b3fdaff93fc5e7d948035fae6412a
|
[
"BSD-2-Clause"
] | null | null | null |
# ************
# File: milp_solver.py
# Top contributors (to current version):
# Panagiotis Kouvaros (panagiotis.kouvaros@gmail.com)
# This file is part of the Venus project.
# Copyright: 2019-2021 by the authors listed in the AUTHORS file in the
# top-level directory.
# License: BSD 2-Clause (see the file LICENSE in the top-level directory).
# Description: Solves a verification problem by tranlating it into MILP.
# ************
from gurobipy import *
from venus.solver.milp_encoder import MILPEncoder
from venus.solver.ideal_formulation import IdealFormulation
from venus.solver.dep_cuts import DepCuts
from venus.solver.solve_result import SolveResult
from venus.solver.solve_report import SolveReport
from venus.split.split_strategy import SplitStrategy
from venus.common.utils import ReluState
from venus.common.logger import get_logger
from timeit import default_timer as timer
import numpy as np
class MILPSolver:
logger = None
def __init__(self, prob, config, lp=False):
"""
Arguments:
prob:
VerificationProblem.
config:
Configuration.
lp:
Whether to use linear relaxation.
"""
MILPSolver.prob = prob
MILPSolver.config = config
MILPSolver.status = SolveResult.UNDECIDED
MILPSolver.lp = lp
if MILPSolver.logger is None:
MILPSolver.logger = get_logger(__name__, config.LOGGER.LOGFILE)
def solve(self):
"""
Builds and solves the MILP program of the verification problem.
Returns:
SolveReport
"""
start = timer()
# encode into milp
me = MILPEncoder(MILPSolver.prob, MILPSolver.config)
if MILPSolver.lp == True:
gmodel = me.lp_encode()
else:
gmodel = me.encode()
# Set gurobi parameters
gmodel.setParam('OUTPUT_FLAG', 1 if MILPSolver.config.SOLVER.PRINT_GUROBI_OUTPUT == True else 0)
if MILPSolver.config.SOLVER.TIME_LIMIT != -1:
gmodel.setParam('TIME_LIMIT', MILPSolver.config.SOLVER.TIME_LIMIT)
if not MILPSolver.config.SOLVER.DEFAULT_CUTS:
MILPSolver.disable_default_cuts(gmodel)
gmodel.setParam('FeasibilityTol', MILPSolver.config.SOLVER.FEASIBILITY_TOL)
gmodel._vars = gmodel.getVars()
# set callback cuts
MILPSolver.id_form = IdealFormulation(
MILPSolver.prob,
gmodel,
MILPSolver.config
)
MILPSolver.dep_cuts = DepCuts(
MILPSolver.prob,
gmodel,
MILPSolver.config
)
# Optimise
if MILPSolver.config.SOLVER.callback_enabled() and MILPSolver.lp == False:
gmodel.optimize(MILPSolver._callback)
else:
gmodel.optimize()
runtime = timer() - start
cex = None
if MILPSolver.status == SolveResult.BRANCH_THRESHOLD:
result = SolveResult.BRANCH_THRESHOLD
elif gmodel.status == GRB.OPTIMAL:
cex_shape = MILPSolver.prob.spec.input_node.input_shape
cex = np.zeros(cex_shape)
for i in itertools.product(*[range(j) for j in cex_shape]):
cex[i] = MILPSolver.prob.spec.input_node.out_vars[i].x
result = SolveResult.UNSAFE
elif gmodel.status == GRB.TIME_LIMIT:
result = SolveResult.TIMEOUT
elif gmodel.status == GRB.INTERRUPTED:
result = SolveResult.INTERRUPTED
elif gmodel.status == GRB.INFEASIBLE or gmodel.status == GRB.INF_OR_UNBD:
result = SolveResult.SAFE
else:
result = SolveResult.UNDECIDED
MILPSolver.logger.info(
'Verification problem {} solved, '
'LP: {}, '
'time: {:.2f}, '
'result: {}.'
.format(
MILPSolver.prob.id,
MILPSolver.lp,
runtime,
result.value))
return SolveReport(result, runtime, cex)
@staticmethod
def _callback(model, where):
"""
Gurobi callback function.
"""
if where == GRB.Callback.MIPNODE:
if model.cbGet(GRB.Callback.MIPNODE_STATUS) == GRB.Status.OPTIMAL:
if MILPSolver.config.SOLVER.IDEAL_CUTS == True:
MILPSolver.id_form.add_cuts()
if MILPSolver.config.SOLVER.dep_cuts_enabled():
MILPSolver.dep_cuts.add_cuts()
elif MILPSolver.config.SOLVER.MONITOR_SPLIT == True and \
MILPSolver.config.SPLITTER.SPLIT_STRATEGY != SplitStrategy.NONE and \
where == GRB.Callback.MIP:
MILPSolver.monitor_milp_nodes(model)
@staticmethod
def monitor_milp_nodes(model):
"""
Monitors the number of MILP nodes solved. Terminates the MILP if the
number exceeds the BRANCH_THRESHOLD.
Arguments:
model: Gurobi model.
"""
nodecnt = model.cbGet(GRB.Callback.MIP_NODCNT)
if nodecnt > MILPSolver.config.SOLVER.BRANCH_THRESHOLD:
MILPSolver.status = SolveResult.BRANCH_THRESHOLD
model.terminate()
@staticmethod
def disable_default_cuts(gmodel):
"""
Disables Gurobi default cuts.
Arguments:
gmodel: Gurobi Model.
Returns:
None.
"""
gmodel.setParam('PreCrush', 1)
gmodel.setParam(GRB.Param.CoverCuts,0)
gmodel.setParam(GRB.Param.CliqueCuts,0)
gmodel.setParam(GRB.Param.FlowCoverCuts,0)
gmodel.setParam(GRB.Param.FlowPathCuts,0)
gmodel.setParam(GRB.Param.GUBCoverCuts,0)
gmodel.setParam(GRB.Param.ImpliedCuts,0)
gmodel.setParam(GRB.Param.InfProofCuts,0)
gmodel.setParam(GRB.Param.MIPSepCuts,0)
gmodel.setParam(GRB.Param.MIRCuts,0)
gmodel.setParam(GRB.Param.ModKCuts,0)
gmodel.setParam(GRB.Param.NetworkCuts,0)
gmodel.setParam(GRB.Param.ProjImpliedCuts,0)
gmodel.setParam(GRB.Param.StrongCGCuts,0)
gmodel.setParam(GRB.Param.SubMIPCuts,0)
gmodel.setParam(GRB.Param.ZeroHalfCuts,0)
gmodel.setParam(GRB.Param.GomoryPasses,0)
| 33.222222
| 105
| 0.620003
|
794c99ff77ff67a8d37d03b7193ae8a280cacc8e
| 1,187
|
py
|
Python
|
test/test_bill_of_materials_api.py
|
altoyield/python-beanieclient
|
448b8dd328054eaf32dd7d0bdff700e603b5c27d
|
[
"Apache-2.0"
] | null | null | null |
test/test_bill_of_materials_api.py
|
altoyield/python-beanieclient
|
448b8dd328054eaf32dd7d0bdff700e603b5c27d
|
[
"Apache-2.0"
] | null | null | null |
test/test_bill_of_materials_api.py
|
altoyield/python-beanieclient
|
448b8dd328054eaf32dd7d0bdff700e603b5c27d
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Beanie ERP API
An API specification for interacting with the Beanie ERP system # noqa: E501
OpenAPI spec version: 0.2
Contact: dev@bean.ie
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import beanie
from beanie.api.bill_of_materials_api import BillOfMaterialsApi # noqa: E501
from beanie.rest import ApiException
class TestBillOfMaterialsApi(unittest.TestCase):
"""BillOfMaterialsApi unit test stubs"""
def setUp(self):
self.api = beanie.api.bill_of_materials_api.BillOfMaterialsApi() # noqa: E501
def tearDown(self):
pass
def test_add_bill_of_material(self):
"""Test case for add_bill_of_material
"""
pass
def test_find_bill_of_material_by_id(self):
"""Test case for find_bill_of_material_by_id
Find Bill of Materials by ID # noqa: E501
"""
pass
def test_find_bill_of_materials(self):
"""Test case for find_bill_of_materials
All bill of materials # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 21.581818
| 86
| 0.67818
|
794c9a081f9669dda491a9d0b80149a21d11f873
| 2,529
|
py
|
Python
|
tests/query_test/test_tpch_queries.py
|
henryr/minimised-impala
|
264d20a4f02ece6ee94e96bc49db2825b0bb1548
|
[
"Apache-2.0"
] | null | null | null |
tests/query_test/test_tpch_queries.py
|
henryr/minimised-impala
|
264d20a4f02ece6ee94e96bc49db2825b0bb1548
|
[
"Apache-2.0"
] | null | null | null |
tests/query_test/test_tpch_queries.py
|
henryr/minimised-impala
|
264d20a4f02ece6ee94e96bc49db2825b0bb1548
|
[
"Apache-2.0"
] | 2
|
2019-09-22T07:59:28.000Z
|
2021-02-25T21:56:07.000Z
|
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
# Functional tests running the TPCH workload.
#
import logging
import pytest
from tests.common.test_vector import *
from tests.common.impala_test_suite import *
class TestTpchQuery(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestTpchQuery, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(create_single_exec_option_dimension())
# The tpch tests take a long time to execute so restrict the combinations they
# execute over
# TODO: the planner tests are based on text and need this.
if cls.exploration_strategy() == 'core':
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format in ['text'])
def test_tpch_q1(self, vector):
self.run_test_case('tpch-q1', vector)
def test_tpch_q2(self, vector):
self.run_test_case('tpch-q2', vector)
def test_tpch_q3(self, vector):
self.run_test_case('tpch-q3', vector)
def test_tpch_q4(self, vector):
self.run_test_case('tpch-q4', vector)
def test_tpch_q5(self, vector):
self.run_test_case('tpch-q5', vector)
def test_tpch_q6(self, vector):
self.run_test_case('tpch-q6', vector)
def test_tpch_q7(self, vector):
self.run_test_case('tpch-q7', vector)
def test_tpch_q8(self, vector):
self.run_test_case('tpch-q8', vector)
def test_tpch_q9(self, vector):
self.run_test_case('tpch-q9', vector)
def test_tpch_q10(self, vector):
self.run_test_case('tpch-q10', vector)
def test_tpch_q11(self, vector):
self.run_test_case('tpch-q11', vector)
def test_tpch_q12(self, vector):
self.run_test_case('tpch-q12', vector)
def test_tpch_q13(self, vector):
self.run_test_case('tpch-q13', vector)
def test_tpch_q14(self, vector):
self.run_test_case('tpch-q14', vector)
def test_tpch_q15(self, vector):
self.run_test_case('tpch-q15', vector)
def test_tpch_q16(self, vector):
self.run_test_case('tpch-q16', vector)
def test_tpch_q17(self, vector):
self.run_test_case('tpch-q17', vector)
def test_tpch_q18(self, vector):
self.run_test_case('tpch-q18', vector)
def test_tpch_q19(self, vector):
self.run_test_case('tpch-q19', vector)
def test_tpch_q20(self, vector):
self.run_test_case('tpch-q20', vector)
def test_tpch_q21(self, vector):
self.run_test_case('tpch-q21', vector)
def test_tpch_q22(self, vector):
self.run_test_case('tpch-q22', vector)
| 27.791209
| 82
| 0.718861
|
794c9a28ecfe104824f399e809595264758666f7
| 2,351
|
py
|
Python
|
DQMOffline/Muon/python/muonQualityTests_cff.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 2
|
2020-01-27T15:21:37.000Z
|
2020-05-11T11:13:18.000Z
|
DQMOffline/Muon/python/muonQualityTests_cff.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 26
|
2018-10-30T12:47:58.000Z
|
2022-03-29T08:39:00.000Z
|
DQMOffline/Muon/python/muonQualityTests_cff.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 3
|
2019-03-09T13:06:43.000Z
|
2020-07-03T00:47:30.000Z
|
import FWCore.ParameterSet.Config as cms
# the clients
from DQM.TrackingMonitor.ClientTrackEfficiencySTACosmicMuons_cff import *
from DQM.TrackingMonitor.ClientTrackEfficiencyTkTracks_cff import *
from DQMOffline.Muon.trackResidualsTest_cfi import *
from DQMOffline.Muon.muonRecoTest_cfi import *
from DQMOffline.Muon.muonTestSummary_cfi import *
from DQMOffline.Muon.muonTestSummaryCosmics_cfi import *
from DQMOffline.Muon.EfficencyPlotter_cfi import *
from DQMOffline.Muon.TriggerMatchEfficencyPlotter_cfi import *
from DQMServices.Core.DQMQualityTester import DQMQualityTester
muonSourcesQualityTests = DQMQualityTester(
prescaleFactor = cms.untracked.int32(1),
qtList = cms.untracked.FileInPath('DQMOffline/Muon/data/QualityTests1.xml')
)
muonClientsQualityTests = DQMQualityTester(
prescaleFactor = cms.untracked.int32(1),
qtList = cms.untracked.FileInPath('DQMOffline/Muon/data/QualityTests2.xml')
)
cosmicMuonQualityTests = cms.Sequence(ClientTrackEfficiencyTkTracks*
ClientTrackEfficiencySTACosmicMuons*
muonSourcesQualityTests*
muTrackResidualsTest*
muRecoTest*
muonClientsQualityTests*
muonCosmicTestSummary)
muonQualityTests = cms.Sequence(muonSourcesQualityTests*
muTrackResidualsTest*
effPlotterLoose*
effPlotterMedium*
effPlotterTight*
muRecoTest*
muonClientsQualityTests*
muonTestSummary)
muonQualityTests_miniAOD = cms.Sequence(muonSourcesQualityTests*
muTrackResidualsTest*
effPlotterLooseMiniAOD*
effPlotterMediumMiniAOD*
effPlotterTightMiniAOD*
muRecoTest*
muonClientsQualityTests*
muonTestSummary*
triggerMatchEffPlotterTightMiniAOD)
| 45.211538
| 79
| 0.578477
|
794c9c120d7c188b74b14699a5022a739d98cbca
| 17,228
|
py
|
Python
|
visual/plot_imp_matrix.py
|
BuysDB/siCloneFitIO
|
b5b1ff320d13e8fe1062a1ed5d55ab161daa9644
|
[
"MIT"
] | 2
|
2019-11-27T20:26:55.000Z
|
2022-02-16T11:15:55.000Z
|
visual/plot_imp_matrix.py
|
BuysDB/siCloneFitIO
|
b5b1ff320d13e8fe1062a1ed5d55ab161daa9644
|
[
"MIT"
] | null | null | null |
visual/plot_imp_matrix.py
|
BuysDB/siCloneFitIO
|
b5b1ff320d13e8fe1062a1ed5d55ab161daa9644
|
[
"MIT"
] | 1
|
2020-06-04T13:54:28.000Z
|
2020-06-04T13:54:28.000Z
|
#!/usr/bin/env python3
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib
import seaborn as sns
import os
import sparsebinarydistance.distance as distance
import traceback
matplotlib.rcParams['figure.dpi']= 300
def select_subset(snvmatrix, n=0):
snvmatrix = snvmatrix.loc[(snvmatrix >= 0).sum(axis=1) >= n]
return snvmatrix
def gen_vis_matrix(raw_matrix, imputed_matrix, transparency = 0.45):
'''
raw values -> 0,1
imputed values -> 0.45, 0.55
return post_impute_matrix
return a matrix with graded values representing the evidence of values (can be even more! different round of imputation!)
'''
# check if values only contains 0 and 1.
print("raw == 1: " ,(raw_matrix == 1).sum().sum())
print("raw == 0: " ,(raw_matrix == 0).sum().sum())
# 2. enhance the transparency of the imputed values
transparent_matrix = imputed_matrix.copy()
transparent_matrix[(transparent_matrix == 0)] = transparency
transparent_matrix[(transparent_matrix == 1)] = ( 1 -transparency)
print("imputed == 0.55: ", (transparent_matrix == 1- transparency).sum().sum())
print("imputed == 0.45: ", (transparent_matrix == transparency).sum().sum())
# 3. combine the measured values and imputed values in a graded matrix
# Select index from the imputed cells from the raw matrix
raw_matrix = raw_matrix.loc[list(imputed_matrix.index)]
print('shape of original and imputed matrix are the same:', raw_matrix.shape == transparent_matrix.shape)
### if the shape is not the same, doesn't update.
combined_matrix = transparent_matrix.copy()
combined_matrix.update(raw_matrix)
# print("combined total 1 equals to original raw:",
# (raw_matrix == 1).sum().sum() == (combined_matrix == 1).sum().sum())
# since siclonefit flip erronous observation, the following test is "False"?
# print("combined 0.55 and 1 equals to imputed 1:", (transparent_matrix == 1 - transparency).sum().sum() == (
# (combined_matrix == 1).sum().sum() + (combined_matrix == 1 - transparency).sum().sum()))
return combined_matrix
class CellSnvPlotMatrix():
def __init__(self, raw_matrix, imputed_matrix, minPresence, minMeasurementsPerCell,
outpath, filename_prefix, copyNumberClone = None, replicate = None, transparency = 0.3, svg=False):
self.raw_matrix = raw_matrix
self.imputed_matrix = imputed_matrix
self.sanity_input_matrix()
self.transparency = transparency
self.svg = svg
self.minPresence = minPresence
self.minMeasurementsPerCell = minMeasurementsPerCell
self.replicate = replicate
self.copyNumberClone = copyNumberClone
self.clusterNumberGroup_mapping = None
self.clusterNumber = None
self.cnv_group_name = None
self.gen_CNVGroup() # create copyNumberClone, clusterNumberGroup_mapping
self.outpath = outpath
self.filename_prefix = filename_prefix
# form by createClusterGroupsColor() <- called in gen_mapping_color()
self.clusterGroupColors = None
# form by cal_sparse_distance()
self.keptsSNVs = None
self.keptsSNVs_for_plotting = None
self.jointMatrixCluster = None
self.snv_distance_matrix = None
# form by plot_snv_snv() <- after cal_sparse_distance()
self.snv_order = None
# form by gen_mapping_color() <- after plot_snv_snv()
self.jm = None
self.colors = None
self.mapping = None
self.cell_order = None
self.cal_sparse_distance()
self.plot_snv_snv()
self.gen_mapping_color()
self.plot_cell_cell()
def gen_CNVGroup(self):
# 0 :Chr18- (Complete loss, no Chr4 loss)
# 1: Chr18- + Chr4A -,
# 2: Chr18-, Chr 4B-
# 3: Chr8- (No Chr18- no chr4-)
# 4: Chr9+ (No Chr18- no chr4- no chr8-)
# 4: Others
# 2020/04/13: new group total 52 states
clusterNumberGroup = {
0: [2, 9, 15, 17, 28, 29, 42, 47, 52], # chr18- (Whole chr18 loss)
1: [3, 13, 27, 34, 40, 41, 43], # chr18- chr4A
2: [4, 22, 36, 46], # chr18- chr4B
3: [1, 8, 16, 18, 19, 23, 30, 32, 35, 44, 49, 50, 51], # chr8- (or chr8+)
4: [5, 6, 7, 10, 12, 20, 25, 26, 31, 38, 39, 45], # chr9+ (also don't have chr8+ or chr18- nor chr4-)
5: [11, 14, 21, 24, 33, 37, 48] # others
}
# create mapping
clusterNumberGroup_mapping = {}
for k, v in clusterNumberGroup.items():
for i in v:
clusterNumberGroup_mapping[i] = k
self.copyNumberClone['CNV Group'] = self.copyNumberClone['state'].map(clusterNumberGroup_mapping)
# get only two columns I need and rewrite!
self.copyNumberClone = self.copyNumberClone[['CNV Group','state']]
self.clusterNumberGroup_mapping = clusterNumberGroup_mapping
self.cnv_group_name = ["chr18(-)", "chr4A(-)", "chr4B(-)", "chr8(-)", "chr9(+)", "others"]
def cal_sparse_distance(self, calculate_imputed_distance = False):
if calculate_imputed_distance is False:
cal_matrix = self.raw_matrix
else:
cal_matrix = self.imputed_matrix
try:
self.keptsSNVs, self.jointMatrixCluster, cc, dd, ee = distance.sparseDistance(
cal_matrix,
minPresence=self.minPresence,
minMeasurementsPerCell=self.minMeasurementsPerCell)
self.keptsSNVs.to_pickle(f"{self.outpath}/{self.replicate}_keptsSNVs.pickle")
self.keptsSNVs_for_plotting = gen_vis_matrix(self.keptsSNVs, self.imputed_matrix, transparency= self.transparency)
self.keptsSNVs_for_plotting = self.keptsSNVs_for_plotting.loc[self.keptsSNVs.index, self.keptsSNVs.columns ]
a, self.snv_distance_matrix, c, d, e = distance.sparseDistance(
self.keptsSNVs.T,
minPresence=1,
minMeasurementsPerCell=1)
except ValueError:
traceback.print_exc()
print(f"{self.keptsSNVs.shape}. Module sparseDistance broke.")
def gen_mapping_color(self):
'''
TODO: select what categories to add, these labels should not be included in the sparse distance calculation. But should be include in plot table
:return:
'''
# self.keptsSNVs_for_plotting.index.name = None
# self.copyNumberClone = self.copyNumberClone.loc[list(self.keptsSNVs_for_plotting.index)].reindex(self.keptsSNVs_for_plotting.index)
# pd.to_pickle(self.keptsSNVs_for_plotting, "/Users/Alice/Desktop/keptsSNVs_for_plotting.pickle")
# pd.to_pickle(self.copyNumberClone, "/Users/Alice/Desktop/copyNumberClone.pickle")
# BUG: if sample names not the same, break. Now: left : with P, right: without P.
self.jm = self.keptsSNVs_for_plotting.join(self.copyNumberClone)
self.jm['replicate'] = self.jm.index.get_level_values(0)
self.createClusterGroupsColor()
self.colors, self.mapping = self.createRowColorDataFrame(nanColor=(1, 1, 1),
predeterminedColorMapping=self.clusterGroupColors)
self.plot_colorbar()
def plot_snv_snv(self):
'''
:param minPresence:
:param minMeasurementsPerCell:
:return: figure saved
'''
cm_rows = sns.clustermap(self.snv_distance_matrix,
method='ward',
cmap='PiYG_r') #distance is defined in snv_distance_matrix
cm_rows.ax_heatmap.set_xticks([])
cm_rows.ax_heatmap.set_xlabel(f"{len(self.snv_distance_matrix.columns)} sSNVs")
cm_rows.ax_heatmap.set_ylabel(f"{len(self.snv_distance_matrix.columns)} sSNVs")
cm_rows.savefig(f"{self.outpath}/{self.replicate}_{self.filename_prefix}_mm{self.minMeasurementsPerCell}_"
f"mp{self.minPresence}_fig0_snv_snv_distance.png", dpi=300)
# retrieve snv order for later use
self.snv_order = cm_rows.dendrogram_row.linkage
def plot_cell_cell(self, shift_label=False):
if type(self.snv_order) == type(None):
# plot_snv_snv has not yet been done.
self.plot_snv_snv()
self.gen_mapping_color()
self._plot_cell_cell(shift_label=shift_label)
def _plot_cell_cell(self, shift_label=False):
cm = sns.clustermap( self.jointMatrixCluster,
row_colors=self.colors,
row_cluster=True,
cmap='PiYG_r', vmin=0, vmax=1, method='ward') #
cm.ax_heatmap.set_xticks([])
cm.ax_heatmap.set_yticks([])
cm.ax_heatmap.set_xlabel(f'{self.jointMatrixCluster.shape[0]} single cells')
cm.ax_heatmap.set_ylabel(f'{self.jointMatrixCluster.shape[0]} single cells')
if shift_label == True:
self.addpatch(cm)
cm.savefig(f'{self.outpath}/{self.replicate}_{self.filename_prefix}_mm{self.minMeasurementsPerCell}_'
f'mp{self.minPresence}fig1_cell_cell.png', dpi=300)
self.cell_order = cm.dendrogram_row.linkage
def plot_snv_cell(self, sorted = True):
if type(self.snv_order) == type(None):
self.plot_snv_snv() # get self.snv_order
self.plot_cell_cell() # get self.cell_order
self.gen_mapping_color() # get self.jm, self.mapping, self.colors
self._plot_snv_cell(sorted=sorted)
def _plot_snv_cell(self, sorted = True):
'''
need self.jm, self.snv_order
:param sorted:
:return:
'''
if sorted == True:
# sort matrix by column CNV Group and cluster
sorted_matrix_combined = self.jm.sort_values(by=['CNV Group', 'replicate', 'state'], ascending=[True, True, True])
# select the cells with CNV measurement.
sorted_matrix_combined = sorted_matrix_combined[sorted_matrix_combined['CNV Group'].notnull()]
sorted_matrix = sorted_matrix_combined.drop(columns=['CNV Group', 'replicate', 'state'])
self._plot_sorted(sorted_matrix)
else:
cg = sns.clustermap(self.keptsSNVs_for_plotting.fillna(0.5).T,
cmap='bwr',
xticklabels=False,
yticklabels=1,
# col_linkage=self.cell_order,
# row_linkage=self.snv_order,
col_colors=self.colors,
method='ward')
cg.ax_heatmap.set_xlabel(f"{len(self.keptsSNVs.index)} single cells")
cg.ax_heatmap.set_ylabel(f"{len(self.keptsSNVs.columns)} sSNV positions")
cg.savefig(f'{self.outpath}/{self.replicate}_{self.filename_prefix}_mm{self.minMeasurementsPerCell}_'
f'mp{self.minPresence}_fig2_cell_snv.png', dpi=300)
def _plot_sorted(self, sorted_matrix):
# sorted_matrix = pd.read_pickle("/Users/Alice/Desktop/sorted_matrix.pickle")
# passed:
# self.snv_order = pd.read_pickle("/Users/Alice/Desktop/row_order.pickle")
# self.colors = pd.read_pickle("/Users/Alice/Desktop/colors.pickle")
cm_patch_sorted = sns.clustermap(sorted_matrix.fillna(0.5).T,
cmap='bwr',
yticklabels=False,
xticklabels=False,
col_cluster=False,
# row_linkage=self.snv_order,
col_colors=self.colors,
method='ward')
cm_patch_sorted.ax_heatmap.set_xlabel(f"{len(self.keptsSNVs.index)} single cells")
cm_patch_sorted.ax_heatmap.set_ylabel(f"{len(self.keptsSNVs.columns)} sSNV positions")
cm_patch_sorted.savefig(f"{self.outpath}/{self.replicate}_{self.filename_prefix}__"
f"mm{self.minMeasurementsPerCell}_mp{self.minPresence}_fig3_cell_snv_sorted.png", dpi=300)
if self.svg == True:
cm_patch_sorted.savefig(f"{self.outpath}/{self.replicate}_{self.filename_prefix}_"
f"mm{self.minMeasurementsPerCell}_mp{self.minPresence}_fig3_cell_snv_sorted.svg")
def plot_colorbar(self):
count = 1
for name in self.mapping:
lut = []
lutKeys = []
for key in sorted(list([x for x in self.mapping[name].keys() if not pd.isnull(x)])):
lut.append(self.mapping[name][key])
lutKeys.append(key)
sns.palplot(sns.color_palette(lut))
locs, labels = plt.xticks()
# if plotting CNV group state color, plotting integer is not informative. Get cnv_group_name variable
if type(lutKeys[0]) == int:
lutKeys = self.cnv_group_name
plt.xticks(locs + 0.5, lutKeys, fontsize='12')
plt.savefig(f"{self.outpath}/{self.replicate}_{self.filename_prefix}_cmap{count}_{name}.png", bbox_inches='tight')
count += 1
def createClusterGroupsColor(self):
clusterOrder = [0, 1, 2, 3, 4]
self.clusterNumber = len(clusterOrder)
self.clusterGroupColors = dict(zip(clusterOrder, [(1.0, 0.8667, 0.0),
(1.0, 0.6471, 0.0),
(0.6157, 0.9373, 0.2235),
(0.2588, 0.8196, 0.9569),
(0.2549, 0.4549, 0.9569)]))
def createRowColorDataFrame(self, nanColor=(0, 0, 0), predeterminedColorMapping={}):
# Should look like:
# discreteStatesDataFrame = pd.DataFrame( [['A','x'],['A','y']],index=['A','B'], columns=['First', 'Second'] )
colorMatrix = []
luts = {}
discreteStatesDataFrame = self.jm[['CNV Group', 'replicate']]
for column in discreteStatesDataFrame:
states = [x for x in discreteStatesDataFrame[column].unique() if not pd.isnull(x)]
undeterminedColorStates = [x for x in discreteStatesDataFrame[column].unique() if
not pd.isnull(x) and not x in predeterminedColorMapping]
cols = sns.color_palette('hls', len(undeterminedColorStates))
# lut = { i:sns.color_palette('bright').jet(x) for i,x in zip(states, np.linspace(0,1,len(states)) )}
lut = {state: cols[i] for i, state in enumerate(undeterminedColorStates)}
lut.update({key: value for key, value in predeterminedColorMapping.items() if key in states})
lut[np.nan] = nanColor
colorMatrix.append([nanColor if pd.isnull(x) else lut[x] for x in discreteStatesDataFrame[column]])
luts[column] = lut
discreteColorMatrix = pd.DataFrame(colorMatrix, index=discreteStatesDataFrame.columns,
columns=discreteStatesDataFrame.index).transpose()
return discreteColorMatrix, luts
def addpatch(self, cm):
clusterRowIndex = 0
for i, c in enumerate(np.array(cm.row_colors[0])[cm.dendrogram_row.reordered_ind]):
cluster = self.jm['CNV Group'].iloc[cm.dendrogram_row.reordered_ind[i]]
# add a white rectangular to mask the original row_color
rect = patches.Rectangle(
(i, clusterRowIndex),
1, 4, fill=True, facecolor='w', edgecolor='blue', lw=0)
cm.ax_row_colors.add_patch(rect)
if np.isnan(cluster):
continue
xStep = (1 / (self.clusterNumber + 1))
startX = xStep * self.clusterNumberGroup_mapping.get(cluster, 1)
rect = patches.Rectangle(
(i, clusterRowIndex + startX),
1, xStep * 4, fill=True, facecolor=c, edgecolor='k', lw=0)
cm.ax_row_colors.add_patch(rect)
def sanity_input_matrix(self):
# 1. check if only include 0, 1. If -1: convert to np.nan
self.raw_matrix[(self.raw_matrix == -1)] = np.nan
self.raw_matrix[(self.raw_matrix > 0.5)] = 1
self.raw_matrix[(self.raw_matrix < 0.5)] = 0
self.imputed_matrix[(self.imputed_matrix == -1)] = np.nan
self.imputed_matrix[(self.imputed_matrix > 0.5)] = 1
self.imputed_matrix[(self.imputed_matrix < 0.5)] = 0
# THis method doesn't work because this only alters
def check_indexname(pickle_files):
'''
:param pickle_files: a list of pickle files
:return:
'''
pickle_files_new = []
for pickle_file in pickle_files:
if pickle_file.index.names is not [None, None, None, None]:
pickle_file.index.names = [None, None, None, None]
pickle_files_new.append(pickle_file)
return pickle_files_new
| 45.57672
| 152
| 0.609589
|
794c9d2a981a50b275fcd25311d5b7fdedb403de
| 534
|
py
|
Python
|
telegram_chat/bot.py
|
joao-coimbra/easy-python-telegram
|
ffbf8959bff83e9c5a712bc49c8a269faa28f047
|
[
"MIT"
] | null | null | null |
telegram_chat/bot.py
|
joao-coimbra/easy-python-telegram
|
ffbf8959bff83e9c5a712bc49c8a269faa28f047
|
[
"MIT"
] | null | null | null |
telegram_chat/bot.py
|
joao-coimbra/easy-python-telegram
|
ffbf8959bff83e9c5a712bc49c8a269faa28f047
|
[
"MIT"
] | null | null | null |
import telegram
import logging
import os
from telegram_chat.keys import *
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO
)
bot = telegram.Bot(
token=TELEGRAM_TOKEN
)
def sendMessage(text):
bot.sendMessage(
chat_id=CHAT_ID,
text=text
)
def sendImage(img, local=True):
if local:
bot.sendPhoto(
chat_id=CHAT_ID,
photo=open(f'{os.getcwd()}\\{img}', 'rb')
)
else:
bot.sendPhoto(
chat_id=CHAT_ID,
photo=img
)
| 18.413793
| 64
| 0.632959
|
794c9dcc5dc1e71d3d49b243901a73c349ca855e
| 6,037
|
py
|
Python
|
train.py
|
jsheng7/DeepCovidXR
|
7bf794d8225bcfa2adff96676013663453077a71
|
[
"MIT"
] | null | null | null |
train.py
|
jsheng7/DeepCovidXR
|
7bf794d8225bcfa2adff96676013663453077a71
|
[
"MIT"
] | null | null | null |
train.py
|
jsheng7/DeepCovidXR
|
7bf794d8225bcfa2adff96676013663453077a71
|
[
"MIT"
] | null | null | null |
# Train an individual model
import argparse
import os
from utils import imgUtils, trainFeatures
import pickle
def get_args():
# Implement command line argument
parser = argparse.ArgumentParser(description='Train a model on a given dataset.')
parser.add_argument('-m', '--model', dest='model_name', metavar = 'model_name',
choices = ['ResNet-50', 'Xception', 'DenseNet-121', 'Inception-V3',
'Inception-ResNet-V2', 'EfficientNet-B2'],
type = str, required = True,
help = 'the name of the model to be trained.\n Choose from ResNet-50, Xception, DenseNet-121, Inception-V3,'
'Inception-ResNet-V2, EfficientNet-B2')
parser.add_argument('--size', '-s', dest='img_size', metavar = 'img_size',
type = int, required = True,
help = 'the size of dataset images')
parser.add_argument('--path', '-p', dest='path', metavar='DATA_path', type=str,
required = True, help='the path that contains the dataset.')
parser.add_argument('--output', '-o', dest='output', metavar='prediction_output_path', type=str,
default=None, required=True, help='the directory to output training curves and saved weights')
parser.add_argument('--weight_path', '-w', dest='weight_path', metavar='weight_path', type=str,
required = True, help='the path to pretrained weights, either NIH if training from scratch or '
'corresponding model weights from our pretrained weights if fine-tuning'
' DeepCOVID-XR.')
parser.add_argument('--hyperparameters', '-hy', dest='hyperparameters', metavar='Hyperparameters', type=str,
required=False, default=None, help='the path to pickled hyperparameters dictionary; will use '
'default parameters if not provided.')
return parser.parse_args()
def make_path(data_dir, base, exp_name):
train_path = os.path.join(data_dir, 'Train')
valid_path = os.path.join(data_dir, 'Validation')
if (not os.path.isdir(train_path)) or (not os.path.isdir(valid_path)):
print('Please split images into train directory and validation directory.')
exit()
freeze_weight_save_path = os.path.join(base, 'save_weights_initial', exp_name + '.h5')
unfreeze_weight_save_path = os.path.join(base, 'save_weights_final', exp_name + '.h5')
freeze_img_save_path = os.path.join(base, 'save_plots_initial', exp_name)
unfreeze_img_save_path = os.path.join(base, 'save_plots_final', exp_name)
if not os.path.exists(os.path.join(base, 'save_weights_initial')):
os.makedirs(os.path.join(base, 'save_weights_initial'))
if not os.path.exists(os.path.join(base, 'save_weights_final')):
os.makedirs(os.path.join(base, 'save_weights_final'))
if not os.path.exists(os.path.join(base, 'save_plots_initial')):
os.makedirs(os.path.join(base, 'save_plots_initial'))
if not os.path.exists(os.path.join(base, 'save_plots_final')):
os.makedirs(os.path.join(base, 'save_plots_final'))
if not os.path.exists(freeze_img_save_path):
os.makedirs(freeze_img_save_path)
if not os.path.exists(unfreeze_img_save_path):
os.makedirs(unfreeze_img_save_path)
return train_path, valid_path, freeze_weight_save_path, unfreeze_weight_save_path, freeze_img_save_path, unfreeze_img_save_path
if __name__=='__main__':
batch_size = 16
rotation_range = 15
height_shift = 0.05
width_shift = 0.05
args = get_args()
data_path = os.path.normpath(args.path)
model_name = args.model_name
img_size = args.img_size
weights = os.path.normpath(args.weight_path)
hyperparameters = args.hyperparameters
exp_name = model_name + '_' + str(img_size)
output_path = args.output
if output_path is not None:
output_path = os.path.normpath(output_path)
else:
output_path = os.getcwd()
train_dir, valid_dir, weight_dir1, weight_dir2, img_dir1, img_dir2 = make_path(data_path, output_path, exp_name)
img_proc = imgUtils(img_size)
train_idg, val_idg = img_proc.dataGen(rotation_range, height_shift, width_shift)
train_gen, val_gen = img_proc.generator(batch_size, train_idg, val_idg, train_dir, valid_dir)
if hyperparameters is not None:
hyperparameters = pickle.load(open(hyperparameters, "rb"))
lr = hyperparameters['learning_rate']
momentum = hyperparameters['momentum']
dropout_rate = hyperparameters['dropout_rate']
else:
lr = 0.001
momentum = 0.9
dropout_rate = 0.3
nesterov = True
patience_rlr = 3
patience_es = 5
factor = 0.1
min_delta = 0.001
monitor = 'val_auc'
pre_epoch = 50
epoch = 50
features = trainFeatures()
rlr = features.setRLP(monitor, factor, patience_rlr)
es = features.setES(monitor, patience_es, min_delta)
cp = features.setCP(monitor, weight_dir1)
dropout_model = features.getDropoutModel(model_name, img_size, weights, dropout_rate)
features.compileModel(dropout_model, lr, momentum, nesterov)
model_history = features.generator(dropout_model, train_gen, val_gen, pre_epoch, cp, rlr, es)
img_proc.plot_save(model_history, img_dir1)
# Unfreeze and train the entire model
model = features.load(dropout_model, weight_dir1)
model = features.unfreeze(model)
patience_es = 10
es = features.setES(monitor, patience_es, min_delta)
cp = features.setCP(monitor, weight_dir2)
features.compileModel(model, lr, momentum, nesterov)
model_history = features.generator(dropout_model, train_gen, val_gen, epoch, cp, rlr, es)
img_proc.plot_save(model_history, img_dir2)
print('Done')
| 42.216783
| 133
| 0.656286
|
794c9e9d37a0f20097f5fb43a6d0a61bd9b521cb
| 25,757
|
py
|
Python
|
selfdrive/car/volkswagen/values.py
|
kansakitw/dragonpilotamd
|
83295e6746e685b22e218bd0bd943df674e42a81
|
[
"MIT"
] | 46
|
2021-05-20T08:54:54.000Z
|
2022-03-30T20:37:42.000Z
|
selfdrive/car/volkswagen/values.py
|
kansakitw/dragonpilotamd
|
83295e6746e685b22e218bd0bd943df674e42a81
|
[
"MIT"
] | 10
|
2020-07-17T14:50:38.000Z
|
2022-03-08T15:40:52.000Z
|
selfdrive/car/volkswagen/values.py
|
kansakitw/dragonpilotamd
|
83295e6746e685b22e218bd0bd943df674e42a81
|
[
"MIT"
] | 130
|
2020-08-19T04:20:02.000Z
|
2022-03-24T23:05:22.000Z
|
# flake8: noqa
from collections import defaultdict
from typing import Dict
from cereal import car
from selfdrive.car import dbc_dict
Ecu = car.CarParams.Ecu
NetworkLocation = car.CarParams.NetworkLocation
TransmissionType = car.CarParams.TransmissionType
GearShifter = car.CarState.GearShifter
class CarControllerParams:
HCA_STEP = 2 # HCA_01 message frequency 50Hz
LDW_STEP = 10 # LDW_02 message frequency 10Hz
GRA_ACC_STEP = 3 # GRA_ACC_01 message frequency 33Hz
GRA_VBP_STEP = 100 # Send ACC virtual button presses once a second
GRA_VBP_COUNT = 16 # Send VBP messages for ~0.5s (GRA_ACC_STEP * 16)
# Observed documented MQB limits: 3.00 Nm max, rate of change 5.00 Nm/sec.
# Limiting rate-of-change based on real-world testing and Comma's safety
# requirements for minimum time to lane departure.
STEER_MAX = 300 # Max heading control assist torque 3.00 Nm
STEER_DELTA_UP = 4 # Max HCA reached in 1.50s (STEER_MAX / (50Hz * 1.50))
STEER_DELTA_DOWN = 10 # Min HCA reached in 0.60s (STEER_MAX / (50Hz * 0.60))
STEER_DRIVER_ALLOWANCE = 80
STEER_DRIVER_MULTIPLIER = 3 # weight driver torque heavily
STEER_DRIVER_FACTOR = 1 # from dbc
class CANBUS:
pt = 0
cam = 2
class DBC_FILES:
mqb = "vw_mqb_2010" # Used for all cars with MQB-style CAN messaging
DBC = defaultdict(lambda: dbc_dict(DBC_FILES.mqb, None)) # type: Dict[str, Dict[str, str]]
BUTTON_STATES = {
"accelCruise": False,
"decelCruise": False,
"cancel": False,
"setCruise": False,
"resumeCruise": False,
"gapAdjustCruise": False
}
MQB_LDW_MESSAGES = {
"none": 0, # Nothing to display
"laneAssistUnavailChime": 1, # "Lane Assist currently not available." with chime
"laneAssistUnavailNoSensorChime": 3, # "Lane Assist not available. No sensor view." with chime
"laneAssistTakeOverUrgent": 4, # "Lane Assist: Please Take Over Steering" with urgent beep
"emergencyAssistUrgent": 6, # "Emergency Assist: Please Take Over Steering" with urgent beep
"laneAssistTakeOverChime": 7, # "Lane Assist: Please Take Over Steering" with chime
"laneAssistTakeOverSilent": 8, # "Lane Assist: Please Take Over Steering" silent
"emergencyAssistChangingLanes": 9, # "Emergency Assist: Changing lanes..." with urgent beep
"laneAssistDeactivated": 10, # "Lane Assist deactivated." silent with persistent icon afterward
}
# Check the 7th and 8th characters of the VIN before adding a new CAR. If the
# chassis code is already listed below, don't add a new CAR, just add to the
# FW_VERSIONS for that existing CAR.
# Exception: SEAT Leon and SEAT Ateca share a chassis code
class CAR:
ATLAS_MK1 = "VOLKSWAGEN ATLAS 1ST GEN" # Chassis CA, Mk1 VW Atlas and Atlas Cross Sport
GOLF_MK7 = "VOLKSWAGEN GOLF 7TH GEN" # Chassis 5G/AU/BA/BE, Mk7 VW Golf and variants
JETTA_MK7 = "VOLKSWAGEN JETTA 7TH GEN" # Chassis BU, Mk7 Jetta
PASSAT_MK8 = "VOLKSWAGEN PASSAT 8TH GEN" # Chassis 3G, Mk8 Passat and variants
TCROSS_MK1 = "VOLKSWAGEN T-CROSS 1ST GEN" # Chassis C1, Mk1 VW T-Cross SWB and LWB variants
TIGUAN_MK2 = "VOLKSWAGEN TIGUAN 2ND GEN" # Chassis AD/BW, Mk2 VW Tiguan and variants
TOURAN_MK2 = "VOLKSWAGEN TOURAN 2ND GEN" # Chassis 1T, Mk2 VW Touran and variants
AUDI_A3_MK3 = "AUDI A3 3RD GEN" # Chassis 8V/FF, Mk3 Audi A3 and variants
AUDI_Q2_MK1 = "AUDI Q2 1ST GEN" # Chassis GA, Mk1 Audi Q2 (RoW) and Q2L (China only)
SEAT_ATECA_MK1 = "SEAT ATECA 1ST GEN" # Chassis 5F, Mk1 SEAT Ateca and CUPRA Ateca
SEAT_LEON_MK3 = "SEAT LEON 3RD GEN" # Chassis 5F, Mk3 SEAT Leon and variants
SKODA_KODIAQ_MK1 = "SKODA KODIAQ 1ST GEN" # Chassis NS, Mk1 Skoda Kodiaq
SKODA_SCALA_MK1 = "SKODA SCALA 1ST GEN" # Chassis NW, Mk1 Skoda Scala and Skoda Kamiq
SKODA_SUPERB_MK3 = "SKODA SUPERB 3RD GEN" # Chassis 3V/NP, Mk3 Skoda Superb and variants
SKODA_OCTAVIA_MK3 = "SKODA OCTAVIA 3RD GEN" # Chassis NE, Mk3 Skoda Octavia and variants
# All supported cars should return FW from the engine, srs, eps, and fwdRadar. Cars
# with a manual trans won't return transmission firmware, but all other cars will.
#
# The 0xF187 SW part number query should return in the form of N[NX][NX] NNN NNN [X[X]],
# where N=number, X=letter, and the trailing two letters are optional. Performance
# tuners sometimes tamper with that field (e.g. 8V0 9C0 BB0 1 from COBB/EQT). Tampered
# ECU SW part numbers are invalid for vehicle ID and compatibility checks. Try to have
# them repaired by the tuner before including them in openpilot.
FW_VERSIONS = {
CAR.ATLAS_MK1: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8703H906026AA\xf1\x899970',
b'\xf1\x8703H906026F \xf1\x896696',
b'\xf1\x8703H906026F \xf1\x899970',
b'\xf1\x8703H906026S \xf1\x896693',
b'\xf1\x8703H906026S \xf1\x899970',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x8709G927158A \xf1\x893387',
b'\xf1\x8709G927158DR\xf1\x893536',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x873Q0959655BC\xf1\x890503\xf1\x82\0161914151912001103111122031200',
b'\xf1\x873Q0959655BN\xf1\x890713\xf1\x82\0162214152212001105141122052900',
b'\xf1\x873Q0959655DB\xf1\x890720\xf1\x82\0162214152212001105141122052900',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x873QF909144B \xf1\x891582\xf1\x82\00571B60924A1',
b'\xf1\x875Q0909143P \xf1\x892051\xf1\x820528B6090105',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x875Q0907572H \xf1\x890620',
b'\xf1\x875Q0907572J \xf1\x890654',
b'\xf1\x875Q0907572P \xf1\x890682',
],
},
CAR.GOLF_MK7: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704E906016A \xf1\x897697',
b'\xf1\x8704E906016AD\xf1\x895758',
b'\xf1\x8704E906023AG\xf1\x891726',
b'\xf1\x8704E906023BN\xf1\x894518',
b'\xf1\x8704E906024K \xf1\x896811',
b'\xf1\x8704E906027GR\xf1\x892394',
b'\xf1\x8704E906027HD\xf1\x893742',
b'\xf1\x8704E906027MA\xf1\x894958',
b'\xf1\x8704L906026BP\xf1\x897608',
b'\xf1\x8704L906026NF\xf1\x899528',
b'\xf1\x8704L906056CL\xf1\x893823',
b'\xf1\x8704L906056CR\xf1\x895813',
b'\xf1\x8704L906056HE\xf1\x893758',
b'\xf1\x870EA906016A \xf1\x898343',
b'\xf1\x870EA906016F \xf1\x895002',
b'\xf1\x870EA906016S \xf1\x897207',
b'\xf1\x875G0906259 \xf1\x890007',
b'\xf1\x875G0906259J \xf1\x890002',
b'\xf1\x875G0906259L \xf1\x890002',
b'\xf1\x875G0906259N \xf1\x890003',
b'\xf1\x875G0906259Q \xf1\x890002',
b'\xf1\x875G0906259Q \xf1\x892313',
b'\xf1\x878V0906259H \xf1\x890002',
b'\xf1\x878V0906259J \xf1\x890003',
b'\xf1\x878V0906259K \xf1\x890001',
b'\xf1\x878V0906259P \xf1\x890001',
b'\xf1\x878V0906259Q \xf1\x890002',
b'\xf1\x878V0906264F \xf1\x890003',
b'\xf1\x878V0906264L \xf1\x890002',
b'\xf1\x878V0906264M \xf1\x890001',
b'\xf1\x878V09C0BB01 \xf1\x890001',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x8709G927749AP\xf1\x892943',
b'\xf1\x8709S927158A \xf1\x893585',
b'\xf1\x870CW300041H \xf1\x891010',
b'\xf1\x870CW300042F \xf1\x891604',
b'\xf1\x870CW300043B \xf1\x891601',
b'\xf1\x870CW300045 \xf1\x894531',
b'\xf1\x870CW300047D \xf1\x895261',
b'\xf1\x870CW300048J \xf1\x890611',
b'\xf1\x870D9300012 \xf1\x894904',
b'\xf1\x870D9300012 \xf1\x894913',
b'\xf1\x870D9300012 \xf1\x894937',
b'\xf1\x870D9300012 \xf1\x895045',
b'\xf1\x870D9300014M \xf1\x895004',
b'\xf1\x870D9300020S \xf1\x895201',
b'\xf1\x870D9300040S \xf1\x894311',
b'\xf1\x870D9300041H \xf1\x895220',
b'\xf1\x870DD300045K \xf1\x891120',
b'\xf1\x870DD300046F \xf1\x891601',
b'\xf1\x870GC300012A \xf1\x891403',
b'\xf1\x870GC300014B \xf1\x892401',
b'\xf1\x870GC300014B \xf1\x892405',
b'\xf1\x870GC300020G \xf1\x892401',
b'\xf1\x870GC300020G \xf1\x892403',
b'\xf1\x870GC300020G \xf1\x892404',
b'\xf1\x870GC300043T \xf1\x899999',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x875Q0959655AA\xf1\x890386\xf1\x82\0211413001113120043114317121C111C9113',
b'\xf1\x875Q0959655AA\xf1\x890386\xf1\x82\0211413001113120053114317121C111C9113',
b'\xf1\x875Q0959655AA\xf1\x890388\xf1\x82\0211413001113120043114317121C111C9113',
b'\xf1\x875Q0959655AA\xf1\x890388\xf1\x82\0211413001113120043114417121411149113',
b'\xf1\x875Q0959655AA\xf1\x890388\xf1\x82\0211413001113120053114317121C111C9113',
b'\xf1\x875Q0959655BH\xf1\x890336\xf1\x82\02314160011123300314211012230229333463100',
b'\xf1\x875Q0959655BT\xf1\x890403\xf1\x82\023141600111233003142404A2252229333463100',
b'\xf1\x875Q0959655BT\xf1\x890403\xf1\x82\023141600111233003142405A2252229333463100',
b'\xf1\x875Q0959655D \xf1\x890388\xf1\x82\0211413001113120006110417121A101A9113',
b'\xf1\x875Q0959655J \xf1\x890830\xf1\x82\023271112111312--071104171825102591131211',
b'\xf1\x875Q0959655J \xf1\x890830\xf1\x82\023271212111312--071104171838103891131211',
b'\xf1\x875Q0959655J \xf1\x890830\xf1\x82\023341512112212--071104172328102891131211',
b'\xf1\x875Q0959655J \xf1\x890830\xf1\x82\x13272512111312--07110417182C102C91131211',
b'\xf1\x875Q0959655M \xf1\x890361\xf1\x82\0211413001112120041114115121611169112',
b'\xf1\x875Q0959655S \xf1\x890870\xf1\x82\02315120011211200621143171717111791132111',
b'\xf1\x875Q0959655S \xf1\x890870\xf1\x82\02324230011211200061104171724102491132111',
b'\xf1\x875Q0959655S \xf1\x890870\xf1\x82\02324230011211200621143171724112491132111',
b'\xf1\x875Q0959655S \xf1\x890870\xf1\x82\x1315120011211200061104171717101791132111',
b'\xf1\x875Q0959655T \xf1\x890825\xf1\x82\023271200111312--071104171837103791132111',
b'\xf1\x875Q0959655T \xf1\x890830\xf1\x82\x13271100111312--071104171826102691131211',
b'\xf1\x875QD959655 \xf1\x890388\xf1\x82\x111413001113120006110417121D101D9112',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x873Q0909144F \xf1\x895043\xf1\x82\00561A01612A0',
b'\xf1\x873Q0909144H \xf1\x895061\xf1\x82\00566A0J612A1',
b'\xf1\x873Q0909144J \xf1\x895063\xf1\x82\00566A00514A1',
b'\xf1\x873Q0909144J \xf1\x895063\xf1\x82\00566A0J712A1',
b'\xf1\x873Q0909144K \xf1\x895072\xf1\x82\00571A0J714A1',
b'\xf1\x873Q0909144L \xf1\x895081\xf1\x82\x0571A0JA15A1',
b'\xf1\x873Q0909144M \xf1\x895082\xf1\x82\00571A01A18A1',
b'\xf1\x873Q0909144M \xf1\x895082\xf1\x82\00571A0JA16A1',
b'\xf1\x875Q0909143K \xf1\x892033\xf1\x820519A9040203',
b'\xf1\x875Q0909144AA\xf1\x891081\xf1\x82\00521A00441A1',
b'\xf1\x875Q0909144AA\xf1\x891081\xf1\x82\x0521A00641A1',
b'\xf1\x875Q0909144AB\xf1\x891082\xf1\x82\00521A00442A1',
b'\xf1\x875Q0909144AB\xf1\x891082\xf1\x82\00521A00642A1',
b'\xf1\x875Q0909144AB\xf1\x891082\xf1\x82\00521A07B05A1',
b'\xf1\x875Q0909144L \xf1\x891021\xf1\x82\00522A00402A0',
b'\xf1\x875Q0909144P \xf1\x891043\xf1\x82\00511A00403A0',
b'\xf1\x875Q0909144R \xf1\x891061\xf1\x82\00516A00604A1',
b'\xf1\x875Q0909144S \xf1\x891063\xf1\x82\00516A00604A1',
b'\xf1\x875Q0909144S \xf1\x891063\xf1\x82\00516A07A02A1',
b'\xf1\x875Q0909144T \xf1\x891072\xf1\x82\00521A00507A1',
b'\xf1\x875Q0909144T \xf1\x891072\xf1\x82\00521A20B03A1',
b'\xf1\x875QD909144B \xf1\x891072\xf1\x82\x0521A00507A1',
b'\xf1\x875QM909144A \xf1\x891072\xf1\x82\x0521A20B03A1',
b'\xf1\x875QM909144B \xf1\x891081\xf1\x82\00521A00442A1',
b'\xf1\x875QN909144A \xf1\x895081\xf1\x82\00571A01A16A1',
b'\xf1\x875QN909144A \xf1\x895081\xf1\x82\00571A01A18A1',
b'\xf1\x875QN909144A \xf1\x895081\xf1\x82\x0571A01A17A1',
b'\xf1\x875QN909144B \xf1\x895082\xf1\x82\00571A01A18A1',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x875Q0907572A \xf1\x890141\xf1\x82\00101',
b'\xf1\x875Q0907572B \xf1\x890200\xf1\x82\00101',
b'\xf1\x875Q0907572C \xf1\x890210\xf1\x82\00101',
b'\xf1\x875Q0907572D \xf1\x890304\xf1\x82\00101',
b'\xf1\x875Q0907572F \xf1\x890400\xf1\x82\00101',
b'\xf1\x875Q0907572G \xf1\x890571',
b'\xf1\x875Q0907572H \xf1\x890620',
b'\xf1\x875Q0907572J \xf1\x890654',
b'\xf1\x875Q0907572P \xf1\x890682',
b'\xf1\x875Q0907572R \xf1\x890771',
],
},
CAR.JETTA_MK7: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704E906024AK\xf1\x899937',
b'\xf1\x8704E906024AS\xf1\x899912',
b'\xf1\x8704E906024B \xf1\x895594',
b'\xf1\x8704E906024L \xf1\x895595',
b'\xf1\x8704E906027MS\xf1\x896223',
b'\xf1\x875G0906259T \xf1\x890003',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x8709G927158BQ\xf1\x893545',
b'\xf1\x8709S927158BS\xf1\x893642',
b'\xf1\x8709S927158R \xf1\x893552',
b'\xf1\x8709S927158R \xf1\x893587',
b'\xf1\x870GC300020N \xf1\x892803',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x875Q0959655AG\xf1\x890336\xf1\x82\02314171231313500314611011630169333463100',
b'\xf1\x875Q0959655BM\xf1\x890403\xf1\x82\02314171231313500314642011650169333463100',
b'\xf1\x875Q0959655BM\xf1\x890403\xf1\x82\02314171231313500314643011650169333463100',
b'\xf1\x875Q0959655BR\xf1\x890403\xf1\x82\02311170031313300314240011150119333433100',
b'\xf1\x875Q0959655BR\xf1\x890403\xf1\x82\02319170031313300314240011550159333463100',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x875QM909144B \xf1\x891081\xf1\x82\00521A10A01A1',
b'\xf1\x875QM909144B \xf1\x891081\xf1\x82\x0521B00404A1',
b'\xf1\x875QM909144C \xf1\x891082\xf1\x82\00521A00642A1',
b'\xf1\x875QM909144C \xf1\x891082\xf1\x82\00521A10A01A1',
b'\xf1\x875QN909144B \xf1\x895082\xf1\x82\00571A10A11A1',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x875Q0907572N \xf1\x890681',
b'\xf1\x875Q0907572R \xf1\x890771',
],
},
CAR.PASSAT_MK8: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704E906023AH\xf1\x893379',
b'\xf1\x8704L906026GA\xf1\x892013',
b'\xf1\x873G0906264 \xf1\x890004',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x870CW300048R \xf1\x890610',
b'\xf1\x870D9300014L \xf1\x895002',
b'\xf1\x870DD300045T \xf1\x891601',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x873Q0959655AN\xf1\x890306\xf1\x82\r58160058140013036914110311',
b'\xf1\x873Q0959655BB\xf1\x890195\xf1\x82\r56140056130012026612120211',
b'\xf1\x875Q0959655S \xf1\x890870\xf1\x82\02315120011111200631145171716121691132111',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x875Q0909143M \xf1\x892041\xf1\x820522B0080803',
b'\xf1\x875Q0909144S \xf1\x891063\xf1\x82\00516B00501A1',
b'\xf1\x875Q0909144T \xf1\x891072\xf1\x82\00521B00703A1',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x873Q0907572B \xf1\x890192',
b'\xf1\x873Q0907572C \xf1\x890195',
b'\xf1\x875Q0907572R \xf1\x890771',
],
},
CAR.TCROSS_MK1: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704C906025AK\xf1\x897053',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x870CW300050E \xf1\x891903',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x872Q0959655AJ\xf1\x890250\xf1\x82\02212130411110411--04041104141311152H14',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x872Q1909144M \xf1\x896041',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x872Q0907572T \xf1\x890383',
],
},
CAR.TIGUAN_MK2: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704L906026EJ\xf1\x893661',
b'\xf1\x8704L906027G \xf1\x899893',
b'\xf1\x875N0906259 \xf1\x890002',
b'\xf1\x8783A907115B \xf1\x890005',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x8709G927158DT\xf1\x893698',
b'\xf1\x870DL300011N \xf1\x892001',
b'\xf1\x870DL300011N \xf1\x892012',
b'\xf1\x870DL300013A \xf1\x893005',
b'\xf1\x870DL300013G \xf1\x892120',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x875Q0959655AR\xf1\x890317\xf1\x82\02331310031333334313132573732379333313100',
b'\xf1\x875Q0959655BM\xf1\x890403\xf1\x82\02316143231313500314641011750179333423100',
b'\xf1\x875Q0959655BT\xf1\x890403\xf1\x82\02312110031333300314240583752379333423100',
b'\xf1\x875Q0959655BT\xf1\x890403\xf1\x82\02331310031333336313140013950399333423100',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x875Q0909143M \xf1\x892041\xf1\x820529A6060603',
b'\xf1\x875QF909144B \xf1\x895582\xf1\x82\00571A60634A1',
b'\xf1\x875QM909144B \xf1\x891081\xf1\x82\x0521A60604A1',
b'\xf1\x875QM909144C \xf1\x891082\xf1\x82\00521A60804A1',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x872Q0907572J \xf1\x890156',
b'\xf1\x872Q0907572Q \xf1\x890342',
b'\xf1\x872Q0907572R \xf1\x890372',
],
},
CAR.TOURAN_MK2: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704L906026HM\xf1\x893017',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x870CW300041E \xf1\x891005',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x875Q0959655AS\xf1\x890318\xf1\x82\023363500213533353141324C4732479333313100',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x875Q0909143P \xf1\x892051\xf1\x820531B0062105',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x873Q0907572C \xf1\x890195',
],
},
CAR.AUDI_A3_MK3: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704E906023AN\xf1\x893695',
b'\xf1\x8704E906023AR\xf1\x893440',
b'\xf1\x8704E906023BL\xf1\x895190',
b'\xf1\x8704E906027CJ\xf1\x897798',
b'\xf1\x8704L997022N \xf1\x899459',
b'\xf1\x875G0906259L \xf1\x890002',
b'\xf1\x875G0906259Q \xf1\x890002',
b'\xf1\x878V0906264B \xf1\x890003',
b'\xf1\x878V0907115B \xf1\x890007',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x870CW300044T \xf1\x895245',
b'\xf1\x870CW300048 \xf1\x895201',
b'\xf1\x870D9300013B \xf1\x894931',
b'\xf1\x870D9300041N \xf1\x894512',
b'\xf1\x870DD300046A \xf1\x891602',
b'\xf1\x870DD300046F \xf1\x891602',
b'\xf1\x870DD300046G \xf1\x891601',
b'\xf1\x870GC300013M \xf1\x892402',
b'\xf1\x870GC300042J \xf1\x891402',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x875Q0959655AM\xf1\x890315\xf1\x82\x1311111111111111311411011231129321212100',
b'\xf1\x875Q0959655J \xf1\x890825\xf1\x82\023111112111111--171115141112221291163221',
b'\xf1\x875Q0959655J \xf1\x890830\xf1\x82\023121111111211--261117141112231291163221',
b'\xf1\x875Q0959655J \xf1\x890830\xf1\x82\x13121111111111--341117141212231291163221',
b'\xf1\x875Q0959655N \xf1\x890361\xf1\x82\0211212001112110004110411111421149114',
b'\xf1\x875Q0959655N \xf1\x890361\xf1\x82\0211212001112111104110411111521159114',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x875Q0909144AB\xf1\x891082\xf1\x82\00521G0G809A1',
b'\xf1\x875Q0909144P \xf1\x891043\xf1\x82\00503G00303A0',
b'\xf1\x875Q0909144P \xf1\x891043\xf1\x82\00503G00803A0',
b'\xf1\x875Q0909144R \xf1\x891061\xf1\x82\00516G00804A1',
b'\xf1\x875Q0909144T \xf1\x891072\xf1\x82\00521G00807A1',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x875Q0907572D \xf1\x890304\xf1\x82\00101',
b'\xf1\x875Q0907572G \xf1\x890571',
b'\xf1\x875Q0907572H \xf1\x890620',
b'\xf1\x875Q0907572P \xf1\x890682',
],
},
CAR.AUDI_Q2_MK1: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704E906027JT\xf1\x894145',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x870CW300041F \xf1\x891006',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x875Q0959655BD\xf1\x890336\xf1\x82\x1311111111111100311211011231129321312111',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x873Q0909144K \xf1\x895072\xf1\x82\x0571F60511A1',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x872Q0907572M \xf1\x890233',
],
},
CAR.SEAT_ATECA_MK1: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704E906027KA\xf1\x893749',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x870D9300014S \xf1\x895202',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x873Q0959655BH\xf1\x890703\xf1\x82\0161212001211001305121211052900',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x873Q0909144L \xf1\x895081\xf1\x82\00571N60511A1',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x872Q0907572M \xf1\x890233',
],
},
CAR.SEAT_LEON_MK3: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704L906021EL\xf1\x897542',
b'\xf1\x8704L906026BP\xf1\x891198',
b'\xf1\x8704L906026BP\xf1\x897608',
b'\xf1\x8705E906018AS\xf1\x899596',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x870CW300050J \xf1\x891908',
b'\xf1\x870D9300042M \xf1\x895016',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x873Q0959655AC\xf1\x890189\xf1\x82\r11110011110011021511110200',
b'\xf1\x873Q0959655AS\xf1\x890200\xf1\x82\r12110012120012021612110200',
b'\xf1\x873Q0959655CM\xf1\x890720\xf1\x82\0161312001313001305171311052900',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x875Q0909144AB\xf1\x891082\xf1\x82\00521N01342A1',
b'\xf1\x875Q0909144P \xf1\x891043\xf1\x82\00511N01805A0',
b'\xf1\x875Q0909144T \xf1\x891072\xf1\x82\00521N05808A1',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x875Q0907572B \xf1\x890200\xf1\x82\00101',
b'\xf1\x875Q0907572H \xf1\x890620',
b'\xf1\x875Q0907572P \xf1\x890682',
],
},
CAR.SKODA_KODIAQ_MK1: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704E906027DD\xf1\x893123',
b'\xf1\x8704L906026DE\xf1\x895418',
b'\xf1\x875NA907115E \xf1\x890003',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x870D9300043 \xf1\x895202',
b'\xf1\x870DL300012M \xf1\x892107',
b'\xf1\x870DL300012N \xf1\x892110',
b'\xf1\x870DL300013G \xf1\x892119',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x873Q0959655BJ\xf1\x890703\xf1\x82\0161213001211001205212111052100',
b'\xf1\x873Q0959655CN\xf1\x890720\xf1\x82\0161213001211001205212112052100',
b'\xf1\x873Q0959655CQ\xf1\x890720\xf1\x82\x0e1213111211001205212112052111',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x875Q0909143P \xf1\x892051\xf1\x820527T6050405',
b'\xf1\x875Q0909143P \xf1\x892051\xf1\x820527T6060405',
b'\xf1\x875Q0910143C \xf1\x892211\xf1\x82\x0567T600G600',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x872Q0907572Q \xf1\x890342',
b'\xf1\x872Q0907572R \xf1\x890372',
],
},
CAR.SKODA_OCTAVIA_MK3: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704E906016ER\xf1\x895823',
b'\xf1\x8704E906027HD\xf1\x893742',
b'\xf1\x8704L906021DT\xf1\x898127',
b'\xf1\x8704L906026BS\xf1\x891541',
b'\xf1\x875G0906259C \xf1\x890002',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x870CW300041N \xf1\x891605',
b'\xf1\x870CW300043B \xf1\x891601',
b'\xf1\x870D9300041C \xf1\x894936',
b'\xf1\x870D9300041J \xf1\x894902',
b'\xf1\x870D9300041P \xf1\x894507',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x873Q0959655AC\xf1\x890200\xf1\x82\r11120011100010022212110200',
b'\xf1\x873Q0959655AQ\xf1\x890200\xf1\x82\r11120011100010312212113100',
b'\xf1\x873Q0959655AS\xf1\x890200\xf1\x82\r11120011100010022212110200',
b'\xf1\x873Q0959655BH\xf1\x890703\xf1\x82\0163221003221002105755331052100',
b'\xf1\x873Q0959655CN\xf1\x890720\xf1\x82\x0e3221003221002105755331052100',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x873Q0909144J \xf1\x895063\xf1\x82\00566A01513A1',
b'\xf1\x875Q0909144AA\xf1\x891081\xf1\x82\00521T00403A1',
b'\xf1\x875Q0909144AB\xf1\x891082\xf1\x82\x0521T00403A1',
b'\xf1\x875Q0909144R \xf1\x891061\xf1\x82\x0516A00604A1',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x875Q0907572D \xf1\x890304\xf1\x82\x0101',
b'\xf1\x875Q0907572F \xf1\x890400\xf1\x82\00101',
b'\xf1\x875Q0907572J \xf1\x890654',
b'\xf1\x875Q0907572P \xf1\x890682',
],
},
CAR.SKODA_SCALA_MK1: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704C906025AK\xf1\x897053',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x870CW300050 \xf1\x891709',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x872Q0959655AM\xf1\x890351\xf1\x82\022111104111104112104040404111111112H14',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x872Q1909144M \xf1\x896041',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x872Q0907572R \xf1\x890372',
],
},
CAR.SKODA_SUPERB_MK3: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704L906026FP\xf1\x891196',
b'\xf1\x8704L906026KB\xf1\x894071',
b'\xf1\x873G0906259B \xf1\x890002',
b'\xf1\x873G0906264A \xf1\x890002',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x870CW300042H \xf1\x891601',
b'\xf1\x870D9300011T \xf1\x894801',
b'\xf1\x870D9300012 \xf1\x894940',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x875Q0959655AE\xf1\x890130\xf1\x82\022111200111121001121118112231292221111',
b'\xf1\x875Q0959655AK\xf1\x890130\xf1\x82\022111200111121001121110012211292221111',
b'\xf1\x875Q0959655BH\xf1\x890336\xf1\x82\02331310031313100313131013141319331413100',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x875Q0909143K \xf1\x892033\xf1\x820514UZ070203',
b'\xf1\x875Q0909143M \xf1\x892041\xf1\x820522UZ070303',
b'\xf1\x875Q0910143B \xf1\x892201\xf1\x82\00563UZ060700',
b'\xf1\x875Q0910143B \xf1\x892201\xf1\x82\x0563UZ060600',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x873Q0907572B \xf1\x890192',
b'\xf1\x873Q0907572B \xf1\x890194',
b'\xf1\x873Q0907572C \xf1\x890195',
],
},
}
| 43.582064
| 106
| 0.677447
|
794c9ee367c1aac5064ded54db0e3ef136215b2a
| 4,480
|
py
|
Python
|
skl2onnx/operator_converters/label_binariser.py
|
xiaowuhu/sklearn-onnx
|
e85674a67a0a043e19c2ffe181e5d31eca8ce40b
|
[
"Apache-2.0"
] | 323
|
2018-12-18T20:23:19.000Z
|
2022-03-25T09:47:31.000Z
|
skl2onnx/operator_converters/label_binariser.py
|
xiaowuhu/sklearn-onnx
|
e85674a67a0a043e19c2ffe181e5d31eca8ce40b
|
[
"Apache-2.0"
] | 408
|
2019-01-02T12:16:10.000Z
|
2022-03-21T14:01:28.000Z
|
skl2onnx/operator_converters/label_binariser.py
|
xiaowuhu/sklearn-onnx
|
e85674a67a0a043e19c2ffe181e5d31eca8ce40b
|
[
"Apache-2.0"
] | 70
|
2018-12-20T19:36:07.000Z
|
2022-03-14T06:41:36.000Z
|
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from ..proto import onnx_proto
from ..common._apply_operation import apply_cast, apply_reshape
from ..common._registration import register_converter
from ..common._topology import Scope, Operator
from ..common._container import ModelComponentContainer
def convert_sklearn_label_binariser(scope: Scope, operator: Operator,
container: ModelComponentContainer):
"""Converts Scikit Label Binariser model to onnx format."""
binariser_op = operator.raw_operator
classes = binariser_op.classes_
if (hasattr(binariser_op, 'sparse_input_') and
binariser_op.sparse_input_):
raise RuntimeError("sparse is not supported for LabelBinarizer.")
if (hasattr(binariser_op, 'y_type_') and
binariser_op.y_type_ == "multilabel-indicator"):
if binariser_op.pos_label != 1:
raise RuntimeError("pos_label != 1 is not supported "
"for LabelBinarizer.")
if list(classes) != list(range(len(classes))):
raise RuntimeError("classes != [0, 1, ..., n_classes] is not "
"supported for LabelBinarizer.")
container.add_node('Identity', operator.inputs[0].full_name,
operator.output_full_names,
name=scope.get_unique_operator_name('identity'))
else:
zeros_tensor = np.full((1, len(classes)),
binariser_op.neg_label, dtype=np.float)
unit_tensor = np.full((1, len(classes)),
binariser_op.pos_label, dtype=np.float)
classes_tensor_name = scope.get_unique_variable_name('classes_tensor')
equal_condition_tensor_name = scope.get_unique_variable_name(
'equal_condition_tensor')
zeros_tensor_name = scope.get_unique_variable_name('zero_tensor')
unit_tensor_name = scope.get_unique_variable_name('unit_tensor')
where_result_name = scope.get_unique_variable_name('where_result')
class_dtype = onnx_proto.TensorProto.STRING
if np.issubdtype(binariser_op.classes_.dtype, np.signedinteger):
class_dtype = onnx_proto.TensorProto.INT64
else:
classes = np.array([s.encode('utf-8') for s in classes])
container.add_initializer(classes_tensor_name, class_dtype,
[len(classes)], classes)
container.add_initializer(
zeros_tensor_name, onnx_proto.TensorProto.FLOAT,
zeros_tensor.shape, zeros_tensor.ravel())
container.add_initializer(
unit_tensor_name, onnx_proto.TensorProto.FLOAT,
unit_tensor.shape, unit_tensor.ravel())
reshaped_input_name = scope.get_unique_variable_name('reshaped_input')
apply_reshape(scope, operator.inputs[0].full_name, reshaped_input_name,
container, desired_shape=[-1, 1])
# Models with classes_/inputs of string type would fail in the
# following step as Equal op does not support string comparison.
container.add_node('Equal', [classes_tensor_name, reshaped_input_name],
equal_condition_tensor_name,
name=scope.get_unique_operator_name('equal'))
container.add_node(
'Where',
[equal_condition_tensor_name, unit_tensor_name, zeros_tensor_name],
where_result_name,
name=scope.get_unique_operator_name('where'))
where_res = where_result_name
if len(binariser_op.classes_) == 2:
array_f_name = scope.get_unique_variable_name(
'array_feature_extractor_result')
pos_class_index_name = scope.get_unique_variable_name(
'pos_class_index')
container.add_initializer(
pos_class_index_name, onnx_proto.TensorProto.INT64, [], [1])
container.add_node(
'ArrayFeatureExtractor',
[where_result_name, pos_class_index_name],
array_f_name, op_domain='ai.onnx.ml',
name=scope.get_unique_operator_name('ArrayFeatureExtractor'))
where_res = array_f_name
apply_cast(scope, where_res, operator.output_full_names, container,
to=onnx_proto.TensorProto.INT64)
register_converter('SklearnLabelBinarizer', convert_sklearn_label_binariser)
| 47.157895
| 79
| 0.651116
|
794ca025542db6c454c7402393374fea92a1c558
| 5,997
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/datalakestore/firewall_rule.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/datalakestore/firewall_rule.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/datalakestore/firewall_rule.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['FirewallRule']
class FirewallRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
end_ip_address: Optional[pulumi.Input[str]] = None,
firewall_rule_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
start_ip_address: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Data Lake Store firewall rule information.
API Version: 2016-11-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the Data Lake Store account.
:param pulumi.Input[str] end_ip_address: The end IP address for the firewall rule. This can be either ipv4 or ipv6. Start and End should be in the same protocol.
:param pulumi.Input[str] firewall_rule_name: The name of the firewall rule to create or update.
:param pulumi.Input[str] resource_group_name: The name of the Azure resource group.
:param pulumi.Input[str] start_ip_address: The start IP address for the firewall rule. This can be either ipv4 or ipv6. Start and End should be in the same protocol.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__['account_name'] = account_name
if end_ip_address is None and not opts.urn:
raise TypeError("Missing required property 'end_ip_address'")
__props__['end_ip_address'] = end_ip_address
__props__['firewall_rule_name'] = firewall_rule_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if start_ip_address is None and not opts.urn:
raise TypeError("Missing required property 'start_ip_address'")
__props__['start_ip_address'] = start_ip_address
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:datalakestore/latest:FirewallRule"), pulumi.Alias(type_="azure-nextgen:datalakestore/v20161101:FirewallRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(FirewallRule, __self__).__init__(
'azure-nextgen:datalakestore:FirewallRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'FirewallRule':
"""
Get an existing FirewallRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return FirewallRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="endIpAddress")
def end_ip_address(self) -> pulumi.Output[str]:
"""
The end IP address for the firewall rule. This can be either ipv4 or ipv6. Start and End should be in the same protocol.
"""
return pulumi.get(self, "end_ip_address")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="startIpAddress")
def start_ip_address(self) -> pulumi.Output[str]:
"""
The start IP address for the firewall rule. This can be either ipv4 or ipv6. Start and End should be in the same protocol.
"""
return pulumi.get(self, "start_ip_address")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 44.753731
| 198
| 0.651159
|
794ca05fc2793f0e1169353db8864f5757ba73c6
| 991
|
py
|
Python
|
daily_problems/problem_0_to_100/problem_33.py
|
rrwt/daily-coding-challenge
|
b16fc365fd142ebab429e605cb146c8bb0bc97a2
|
[
"MIT"
] | 1
|
2019-04-18T03:29:02.000Z
|
2019-04-18T03:29:02.000Z
|
daily_problems/problem_0_to_100/problem_33.py
|
rrwt/daily-coding-challenge
|
b16fc365fd142ebab429e605cb146c8bb0bc97a2
|
[
"MIT"
] | null | null | null |
daily_problems/problem_0_to_100/problem_33.py
|
rrwt/daily-coding-challenge
|
b16fc365fd142ebab429e605cb146c8bb0bc97a2
|
[
"MIT"
] | null | null | null |
"""
Compute the running median of a sequence of numbers. That is,
given a stream of numbers, print out the median of the list so far on each new element.
Recall that the median of an even-numbered list is the average of the two middle numbers.
For example, given the sequence [2, 1, 5, 7, 2, 0, 5], your algorithm should print out:
2 1.5 2 3.5 2 2 2
"""
def running_median() -> float:
count_num = 0
index_med = -1
median = None
arr = []
while (num := (yield median)) is not None:
arr.append(num)
arr.sort()
count_num += 1
if count_num & 1:
index_med += 1
median = arr[index_med]
else:
median = (arr[index_med] + arr[index_med + 1]) / 2
if __name__ == "__main__":
rm = running_median()
rm.send(None) # To put the generator in active state. alt: next(rm)
for _ in [2, 1, 5, 7, 2, 0, 5]:
try:
print(rm.send(_))
except StopIteration:
break
| 27.527778
| 89
| 0.586276
|
794ca0c0fbab5907f14a81327e383e4e4ed4c5b6
| 938
|
py
|
Python
|
api/views.py
|
Public-Health-Bioinformatics/sequdas-web
|
7ad58510568844f8469c68832caaa94b48f75fb3
|
[
"MIT"
] | 1
|
2020-02-02T05:01:16.000Z
|
2020-02-02T05:01:16.000Z
|
api/views.py
|
Public-Health-Bioinformatics/sequdas-web
|
7ad58510568844f8469c68832caaa94b48f75fb3
|
[
"MIT"
] | 3
|
2018-12-06T01:49:54.000Z
|
2019-01-07T18:30:11.000Z
|
api/views.py
|
Public-Health-Bioinformatics/sequdas-web
|
7ad58510568844f8469c68832caaa94b48f75fb3
|
[
"MIT"
] | null | null | null |
from rest_framework import viewsets
from django.contrib.auth.models import User
from . import models
from . import serializers
class UserViewSet(viewsets.ReadOnlyModelViewSet):
queryset = User.objects.all()
serializer_class = serializers.UserSerializer
class SequencerViewSet(viewsets.ReadOnlyModelViewSet):
queryset = models.Sequencer.objects.all()
serializer_class = serializers.SequencerSerializer
class MiseqSequenceRunViewSet(viewsets.ReadOnlyModelViewSet):
queryset = models.MiseqSequenceRun.objects.all()
serializer_class = serializers.MiseqSequenceRunSerializer
class MiseqSampleSheetViewSet(viewsets.ReadOnlyModelViewSet):
queryset = models.MiseqSampleSheet.objects.all()
serializer_class = serializers.MiseqSampleSheetSerializer
class MiseqSampleViewSet(viewsets.ReadOnlyModelViewSet):
queryset = models.MiseqSample.objects.all()
serializer_class = serializers.MiseqSampleSerializer
| 37.52
| 61
| 0.824094
|
794ca0eb40e2c2effc5ed19e4d2916ef2663d55b
| 2,474
|
py
|
Python
|
python/seldon_core/persistence.py
|
dtaniwaki/seldon-core
|
fa9b7451b866b355571a46df6f697e3577c8259a
|
[
"Apache-2.0"
] | null | null | null |
python/seldon_core/persistence.py
|
dtaniwaki/seldon-core
|
fa9b7451b866b355571a46df6f697e3577c8259a
|
[
"Apache-2.0"
] | null | null | null |
python/seldon_core/persistence.py
|
dtaniwaki/seldon-core
|
fa9b7451b866b355571a46df6f697e3577c8259a
|
[
"Apache-2.0"
] | null | null | null |
import threading
import os
import time
import logging
import pickle
import redis
from seldon_core.user_model import SeldonComponent
from typing import Dict, Type
logger = logging.getLogger(__name__)
PRED_UNIT_ID = os.environ.get("PREDICTIVE_UNIT_ID", "0")
PREDICTOR_ID = os.environ.get("PREDICTOR_ID", "0")
DEPLOYMENT_ID = os.environ.get("SELDON_DEPLOYMENT_ID", "0")
REDIS_KEY = f"persistence_{DEPLOYMENT_ID}_{PREDICTOR_ID}_{PRED_UNIT_ID}"
REDIS_HOST = os.environ.get('REDIS_SERVICE_HOST', 'localhost')
REDIS_PORT = os.environ.get("REDIS_SERVICE_PORT", 6379)
DEFAULT_PUSH_FREQUENCY = 60
def restore(user_class: Type[SeldonComponent], parameters: Dict) -> SeldonComponent:
"""
Restore saved state from Redis
Parameters
----------
user_class
User class
parameters
The parameters for the class
Returns
-------
A restored class or a new one
"""
logger.info("Restoring saved model from redis")
redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT)
saved_state_binary = redis_client.get(REDIS_KEY)
if saved_state_binary is None:
logger.info("Saved state is empty, restoration aborted")
return user_class(**parameters)
else:
return pickle.loads(saved_state_binary)
def persist(user_object: SeldonComponent, push_frequency: int = None):
"""
Start a thread to persist a user class to Redis
Parameters
----------
user_object
A user class object
push_frequency
How often to save state (secs)
"""
if push_frequency is None:
push_frequency = DEFAULT_PUSH_FREQUENCY
logger.info("Creating persistence thread, with frequency %s", push_frequency)
persistence_thread = PersistenceThread(user_object, push_frequency)
persistence_thread.start()
class PersistenceThread(threading.Thread):
def __init__(self, user_object, push_frequency):
self.user_object = user_object
self.push_frequency = push_frequency
self._stopped = False
self.redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT)
super(PersistenceThread, self).__init__()
def stop(self):
logger.info("Stopping Persistence Thread")
self._stopped = True
def run(self):
while not self._stopped:
time.sleep(self.push_frequency)
binary_data = pickle.dumps(self.user_object)
self.redis_client.set(REDIS_KEY, binary_data)
| 28.767442
| 84
| 0.70574
|
794ca1d66ad4a75b8ff7b87ca6c5032871862036
| 6,438
|
py
|
Python
|
web3/datastructures.py
|
kseikyo/web3.py
|
8bc987e8ec2089133fbaf870dd0daa71e7447584
|
[
"MIT"
] | 3
|
2019-11-12T07:55:51.000Z
|
2020-04-01T11:19:18.000Z
|
web3/datastructures.py
|
kseikyo/web3.py
|
8bc987e8ec2089133fbaf870dd0daa71e7447584
|
[
"MIT"
] | 5
|
2020-07-18T14:09:54.000Z
|
2022-02-18T13:07:42.000Z
|
web3/datastructures.py
|
kseikyo/web3.py
|
8bc987e8ec2089133fbaf870dd0daa71e7447584
|
[
"MIT"
] | 2
|
2019-10-20T14:54:47.000Z
|
2020-06-11T07:29:37.000Z
|
from collections import (
OrderedDict,
)
from collections.abc import (
Hashable,
Mapping,
MutableMapping,
Sequence,
)
from eth_utils import (
is_integer,
)
from web3._utils.formatters import (
recursive_map,
)
# Hashable must be immutable:
# "the implementation of hashable collections requires that a key's hash value is immutable"
# https://docs.python.org/3/reference/datamodel.html#object.__hash__
class ReadableAttributeDict(Mapping):
"""
The read attributes for the AttributeDict types
"""
def __init__(self, dictionary, *args, **kwargs):
self.__dict__ = dict(dictionary)
self.__dict__.update(dict(*args, **kwargs))
def __getitem__(self, key):
return self.__dict__[key]
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
def __repr__(self):
return self.__class__.__name__ + "(%r)" % self.__dict__
def _repr_pretty_(self, builder, cycle):
"""
Custom pretty output for the IPython console
"""
builder.text(self.__class__.__name__ + "(")
if cycle:
builder.text("<cycle>")
else:
builder.pretty(self.__dict__)
builder.text(")")
@classmethod
def _apply_if_mapping(cls, value):
if isinstance(value, Mapping):
return cls(value)
else:
return value
@classmethod
def recursive(cls, value):
return recursive_map(cls._apply_if_mapping, value)
class MutableAttributeDict(MutableMapping, ReadableAttributeDict):
def __setitem__(self, key, val):
self.__dict__[key] = val
def __delitem__(self, key):
del self.__dict__[key]
class AttributeDict(ReadableAttributeDict, Hashable):
"""
This provides superficial immutability, someone could hack around it
"""
def __setattr__(self, attr, val):
if attr == '__dict__':
super().__setattr__(attr, val)
else:
raise TypeError('This data is immutable -- create a copy instead of modifying')
def __delattr__(self, key):
raise TypeError('This data is immutable -- create a copy instead of modifying')
def __hash__(self):
return hash(tuple(sorted(self.items())))
def __eq__(self, other):
if isinstance(other, Mapping):
return self.__dict__ == dict(other)
else:
return False
class NamedElementOnion(Mapping):
"""
Add layers to an onion-shaped structure. Optionally, inject to a specific layer.
This structure is iterable, where the outermost layer is first, and innermost is last.
"""
def __init__(self, init_elements, valid_element=callable):
self._queue = OrderedDict()
for element in reversed(init_elements):
if valid_element(element):
self.add(element)
else:
self.add(*element)
def add(self, element, name=None):
if name is None:
name = element
if name in self._queue:
if name is element:
raise ValueError("You can't add the same un-named instance twice")
else:
raise ValueError("You can't add the same name again, use replace instead")
self._queue[name] = element
def inject(self, element, name=None, layer=None):
"""
Inject a named element to an arbitrary layer in the onion.
The current implementation only supports insertion at the innermost layer,
or at the outermost layer. Note that inserting to the outermost is equivalent
to calling :meth:`add` .
"""
if not is_integer(layer):
raise TypeError("The layer for insertion must be an int.")
elif layer != 0 and layer != len(self._queue):
raise NotImplementedError(
"You can only insert to the beginning or end of a %s, currently. "
"You tried to insert to %d, but only 0 and %d are permitted. " % (
type(self),
layer,
len(self._queue),
)
)
self.add(element, name=name)
if layer == 0:
if name is None:
name = element
self._queue.move_to_end(name, last=False)
elif layer == len(self._queue):
return
else:
raise AssertionError("Impossible to reach: earlier validation raises an error")
def clear(self):
self._queue.clear()
def replace(self, old, new):
if old not in self._queue:
raise ValueError("You can't replace unless one already exists, use add instead")
to_be_replaced = self._queue[old]
if to_be_replaced is old:
# re-insert with new name in old slot
self._replace_with_new_name(old, new)
else:
self._queue[old] = new
return to_be_replaced
def remove(self, old):
if old not in self._queue:
raise ValueError("You can only remove something that has been added")
del self._queue[old]
def _replace_with_new_name(self, old, new):
self._queue[new] = new
found_old = False
for key in list(self._queue.keys()):
if not found_old:
if key == old:
found_old = True
continue
elif key != new:
self._queue.move_to_end(key)
del self._queue[old]
def __iter__(self):
elements = self._queue.values()
if not isinstance(elements, Sequence):
elements = list(elements)
return iter(reversed(elements))
def __add__(self, other):
if not isinstance(other, NamedElementOnion):
raise NotImplementedError("You can only combine with another NamedElementOnion")
combined = self._queue.copy()
combined.update(other._queue)
return NamedElementOnion(combined.items())
def __contains__(self, element):
return element in self._queue
def __getitem__(self, element):
return self._queue[element]
def __len__(self):
return len(self._queue)
def __reversed__(self):
elements = self._queue.values()
if not isinstance(elements, Sequence):
elements = list(elements)
return iter(elements)
| 29.805556
| 92
| 0.605623
|
794ca1ed5d2fd63d2b6a2543efdcbede2f3502f0
| 600
|
py
|
Python
|
utils/CompetitionLimited.py
|
slyslyme/CTF_AWD_Platform
|
6e9eec0a23a316aaf1927d4ec5be923ac26ff21e
|
[
"MIT"
] | null | null | null |
utils/CompetitionLimited.py
|
slyslyme/CTF_AWD_Platform
|
6e9eec0a23a316aaf1927d4ec5be923ac26ff21e
|
[
"MIT"
] | 1
|
2019-04-30T14:48:46.000Z
|
2019-04-30T14:48:46.000Z
|
utils/CompetitionLimited.py
|
slyslyme/CTF_AWD_Platform
|
6e9eec0a23a316aaf1927d4ec5be923ac26ff21e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 19-5-19 下午8:20
# @Author : Archerx
# @Site : https://blog.ixuchao.cn
# @File : CompetitionLimited.py
# @Software: PyCharm
from rest_framework import serializers
import datetime
def CompetitionIsStarted(competiton):
start_time = competiton.competition_start
end_time = competiton.competition_end
now = datetime.datetime.now()
if now < start_time:
raise serializers.ValidationError({"402": "比赛未开始"})
if now > end_time:
raise serializers.ValidationError({"403": "比赛已经结束"})
else:
pass
| 25
| 61
| 0.67
|
794ca1f4ff3168cfffe3212fb71de83f898ff539
| 2,382
|
py
|
Python
|
tests/brightway_fixtures.py
|
pjamesjoyce/wurst
|
95b37e72eaa18b33bdd83cd4a51d37d9eb4ae7ba
|
[
"BSD-2-Clause"
] | 1
|
2022-03-29T14:59:13.000Z
|
2022-03-29T14:59:13.000Z
|
tests/brightway_fixtures.py
|
pjamesjoyce/wurst
|
95b37e72eaa18b33bdd83cd4a51d37d9eb4ae7ba
|
[
"BSD-2-Clause"
] | null | null | null |
tests/brightway_fixtures.py
|
pjamesjoyce/wurst
|
95b37e72eaa18b33bdd83cd4a51d37d9eb4ae7ba
|
[
"BSD-2-Clause"
] | null | null | null |
try:
from bw2data.tests import bw2test
from bw2data import Database
import pytest
biosphere = {
("biosphere", '1'): {
'categories': ['things'],
'code': '1',
'exchanges': [],
'reference product': 'find me!',
'name': 'an emission',
'type': 'emission',
'unit': 'kg'
},
("biosphere", '2'): {
'categories': ['things'],
'code': '2',
'exchanges': [],
'type': 'emission',
'name': 'another emission',
'unit': 'kg'
},
}
food = {
("food", '1'): {
'categories': ['stuff', 'meals'],
'code': '1',
'classifications': [42],
'comment': 'Yep',
'reference product': 'stuff',
'exchanges': [{
'amount': 0.5,
'input': ('food', '2'),
'type': 'technosphere',
'production volume': 13},
{'amount': 0.05,
'input': ('biosphere', '1'),
'type': 'biosphere',
'uncertainty type': 4}],
'location': 'CA',
'name': 'lunch',
'type': 'process',
'unit': 'kg',
'parameters': {
'losses_gross_net': {'amount': 0.01}
},
},
("food", '2'): {
'categories': ['stuff', 'meals'],
'code': '2',
'exchanges': [{
'amount': 0.25,
'input': ('food', '1'),
'type': 'technosphere',
'uncertainty type': 0},
{'amount': 0.15,
'input': ('biosphere', '2'),
'type': 'biosphere',
'uncertainty type': 0}],
'location': 'CH',
'name': 'dinner',
'type': 'process',
'unit': 'kg',
'parameters': [{
'name': 'rara',
'amount': 13,
'something': 'else',
}],
},
}
@pytest.fixture(scope='function')
@bw2test
def test_bw2_database():
d = Database("biosphere")
d.write(biosphere)
d = Database("food")
d.write(food)
except ImportError:
test_bw2_database = None
| 28.023529
| 52
| 0.370277
|
794ca21ce5f979a1fe053fd83d529204d342ae6f
| 18,448
|
py
|
Python
|
wildcam/_version.py
|
wiederm/wildcam
|
74f4e9e1c96a160be1042a6d7086e1406dd4b058
|
[
"MIT"
] | null | null | null |
wildcam/_version.py
|
wiederm/wildcam
|
74f4e9e1c96a160be1042a6d7086e1406dd4b058
|
[
"MIT"
] | null | null | null |
wildcam/_version.py
|
wiederm/wildcam
|
74f4e9e1c96a160be1042a6d7086e1406dd4b058
|
[
"MIT"
] | null | null | null |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "wildcam/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| 35.408829
| 79
| 0.584508
|
794ca29848e2f978e847d6431d4824b64e5ed854
| 674
|
py
|
Python
|
models/migrations/0006_auto_20200426_2222.py
|
liembudzien/Personal-Dashboard
|
43d6501258edb667a629660565e5b7d60ff2ff5f
|
[
"Unlicense"
] | null | null | null |
models/migrations/0006_auto_20200426_2222.py
|
liembudzien/Personal-Dashboard
|
43d6501258edb667a629660565e5b7d60ff2ff5f
|
[
"Unlicense"
] | null | null | null |
models/migrations/0006_auto_20200426_2222.py
|
liembudzien/Personal-Dashboard
|
43d6501258edb667a629660565e5b7d60ff2ff5f
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-04-27 03:22
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('models', '0005_auto_20200422_1455'),
]
operations = [
migrations.AlterField(
model_name='taskitem',
name='task_created_date',
field=models.DateTimeField(default=datetime.datetime(2020, 4, 26, 22, 22, 11, 479100)),
),
migrations.AlterField(
model_name='taskitem',
name='task_due_date',
field=models.DateTimeField(default=datetime.datetime(2020, 4, 26, 22, 22, 11, 479116)),
),
]
| 26.96
| 99
| 0.611276
|
794ca61bcba04f09d71b321f84c7be4612cde633
| 13,054
|
py
|
Python
|
data/suncg_dataset.py
|
aluo-x/3D_SLN
|
0a29dbf17e3ca58064e76f9227f536a127c4863b
|
[
"Apache-2.0"
] | 39
|
2020-07-25T18:03:18.000Z
|
2022-03-30T04:27:47.000Z
|
data/suncg_dataset.py
|
aluo-x/3D_SLN
|
0a29dbf17e3ca58064e76f9227f536a127c4863b
|
[
"Apache-2.0"
] | 1
|
2020-10-19T03:12:48.000Z
|
2020-10-19T03:49:04.000Z
|
data/suncg_dataset.py
|
aluo-x/3D_SLN
|
0a29dbf17e3ca58064e76f9227f536a127c4863b
|
[
"Apache-2.0"
] | 6
|
2020-08-02T07:44:42.000Z
|
2022-01-06T03:13:15.000Z
|
import random
from collections import defaultdict
import torch
from torch.utils.data import Dataset
from data.base_dataset import BaseDataset
from utils import load_json, compute_rel
class SuncgDataset(BaseDataset):
def __init__(self, data_dir, train_3d, touching_relations=True, use_attr_30=False):
super(Dataset, self).__init__()
self.train_3d = train_3d
assert self.train_3d
# Do we train using 3D coors? You want True.
self.use_attr_30 = use_attr_30
# Do we want to train on object attributes? Split by 70:30? Tall/Short & Large/Small & None?
print("Starting to read the json file for SUNCG")
self.data = load_json(data_dir)
# Json file for cleaned & normalized data
self.room_ids = [int(i) for i in list(self.data)]
self.touching_relations = touching_relations
# Do objects touch? Works either way
# Construction dict
# obj_name is object type (chair/table/sofa etc. etc.)
# pred_name is relation type (left/right etc.)
# idx_to_name maps respective index back to object type or relation name
valid_types = load_json("metadata/valid_types.json")
self.vocab = {'object_idx_to_name': ['__room__'] + valid_types}
# map obj type to idx
self.vocab['object_name_to_idx'] = {}
for i, name in enumerate(self.vocab['object_idx_to_name']):
self.vocab['object_name_to_idx'][name] = i
# map idx to relation type
self.vocab['pred_idx_to_name'] = [
'__in_room__',
'left of',
'right of',
'behind',
'in front of',
'inside',
'surrounding',
'left touching',
'right touching',
'front touching',
'behind touching',
'front left',
'front right',
'back left',
'back right',
'on',
]
# We don't actually use the front left, front right, back left, back right
# map relation type to idx
self.vocab['pred_name_to_idx'] = {}
for idx, name in enumerate(self.vocab['pred_idx_to_name']):
self.vocab['pred_name_to_idx'][name] = idx
self.vocab['attrib_idx_to_name'] = [
'none',
'tall',
'short',
'large',
'small',
]
self.vocab['attrib_name_to_idx'] = {}
for idx, name in enumerate(self.vocab['attrib_idx_to_name']):
self.vocab['attrib_name_to_idx'][name] = idx
self.image_id_to_objects = defaultdict(list)
self.room_bboxes = {}
for room_id in self.data:
room = self.data[room_id]
room_id = int(room_id)
self.image_id_to_objects[room_id] = room["valid_objects"]
self.room_bboxes[room_id] = room["bbox"]
self.size_data = load_json(
"metadata/size_info_many.json")
self.size_data_30 = load_json(
"metadata/30_size_info_many.json")
def total_objects(self):
total_objs = 0
for i, room_id in enumerate(self.room_ids):
num_objs = len(self.image_id_to_objects[room_id])
total_objs += num_objs
return total_objs
def __len__(self):
return len(self.room_ids)
def return_room_ids(self):
return self.room_ids
def get_by_room_id(self, room_id):
try:
idx = self.room_ids.index(int(room_id))
except:
print("Get by room id failed! Defaulting to 0.")
idx = 0
return self.__getitem__(idx)
def __getitem__(self, index):
room_id = self.room_ids[index]
objs, boxes, angles = [], [], []
for object_data in self.image_id_to_objects[room_id]:
obj_type = object_data["type"]
objs.append(self.vocab['object_name_to_idx'][obj_type])
bbox = object_data['new_bbox']
# Get min/max of the bbox
x0 = bbox[0][0]
y0 = bbox[0][1]
z0 = bbox[0][2]
x1 = bbox[1][0]
y1 = bbox[1][1]
z1 = bbox[1][2]
if self.train_3d:
boxes.append(torch.FloatTensor([x0, y0, z0, x1, y1, z1]))
else:
boxes.append(torch.FloatTensor([x0, z0, x1, z1]))
theta = object_data['rotation']
angles.append(theta)
objs.append(self.vocab['object_name_to_idx']['__room__'])
room_bbox = self.room_bboxes[room_id]
x0 = 0.0
y0 = 0.0
z0 = 0.0
x1 = room_bbox[0]
y1 = room_bbox[1]
z1 = room_bbox[2]
if self.train_3d:
boxes.append(torch.FloatTensor([x0, y0, z0, x1, y1, z1]))
else:
boxes.append(torch.FloatTensor([x0, z0, x1, z1]))
angles.append(0)
objs = torch.LongTensor(objs)
boxes = torch.stack(boxes, dim=0)
# Angles are discrete, so make it a long tensor
angles = torch.LongTensor(angles)
# Compute centers of all objects
obj_centers = []
if self.train_3d:
for i, obj_idx in enumerate(objs):
x0, y0, z0, x1, y1, z1 = boxes[i]
mean_x = 0.5 * (x0 + x1)
mean_y = 0.5 * (y0 + y1)
mean_z = 0.5 * (z0 + z1)
obj_centers.append([mean_x, mean_y, mean_z])
else:
for i, obj_idx in enumerate(objs):
x0, z0, x1, z1 = boxes[i]
mean_x = 0.5 * (x0 + x1)
mean_z = 0.5 * (z0 + z1)
obj_centers.append([mean_x, mean_z])
obj_centers = torch.FloatTensor(obj_centers)
# Compute scene graphs
triples = []
num_objs = objs.size(0)
__room__ = self.vocab['object_name_to_idx']['__room__']
real_objs = []
if num_objs > 1:
# get non-room object indices
real_objs = (objs != __room__).nonzero().squeeze(1)
if self.train_3d:
# special: "on" relationships
on_rels = defaultdict(list)
for cur in real_objs:
choices = [obj for obj in real_objs if obj != cur]
for other in choices:
cur_box = boxes[cur]
other_box = boxes[other]
p = compute_rel(cur_box, other_box, None, None)
if p == "on":
p = self.vocab['pred_name_to_idx']['on']
triples.append([cur, p, other])
on_rels[cur].append(other)
# add random relationships
for cur in real_objs:
choices = [obj for obj in real_objs if obj != cur]
other = random.choice(choices)
if random.random() > 0.5:
s, o = cur, other
else:
s, o = other, cur
if s in on_rels[o] or o in on_rels[s]:
continue
p = compute_rel(boxes[s], boxes[o], None, None)
p = self.vocab['pred_name_to_idx'][p]
triples.append([s, p, o])
# Add __in_room__ triples
O = objs.size(0)
for i in range(O - 1):
p = compute_rel(boxes[i], boxes[-1], None, "__room__")
p = self.vocab['pred_name_to_idx'][p]
triples.append([i, p, O - 1])
triples = torch.LongTensor(triples)
# normalize boxes, all in [0,1] relative to room
b = boxes.size(0)
if self.train_3d:
for i in range(b - 1):
boxes[i][0] /= boxes[-1][3]
boxes[i][3] /= boxes[-1][3]
boxes[i][1] /= boxes[-1][4]
boxes[i][4] /= boxes[-1][4]
boxes[i][2] /= boxes[-1][5]
boxes[i][5] /= boxes[-1][5]
else:
for i in range(b - 1):
boxes[i][0] /= boxes[-1][2]
boxes[i][2] /= boxes[-1][2]
boxes[i][1] /= boxes[-1][3]
boxes[i][3] /= boxes[-1][3]
if not self.use_attr_30:
# compute size attributes using normalized bboxes
attributes = []
for i in range(b - 1):
obj_type = self.vocab['object_idx_to_name'][objs[i]]
if random.random() > 0.5 or (obj_type not in self.size_data):
attributes.append("none")
else:
obj_type = self.vocab['object_idx_to_name'][objs[i]]
if random.random() > 0.5:
# tall/short
obj_height = boxes[i][4] - boxes[i][1]
if obj_height > self.size_data[obj_type][0][1]:
attributes.append("tall")
else:
attributes.append("short")
else:
# large/small
obj_volume = (boxes[i][3] - boxes[i][0]) * (boxes[i][4] - boxes[i][1]) * (
boxes[i][5] - boxes[i][2])
if obj_volume > self.size_data[obj_type][1]:
attributes.append("large")
else:
attributes.append("small")
else:
# compute size attributes using normalized bboxes, use 30/70 size
attributes = []
for i in range(b - 1):
obj_type = self.vocab['object_idx_to_name'][objs[i]]
if random.random() > 0.5 or (obj_type not in self.size_data_30):
# if random.random() > 0.7:
attributes.append("none")
else:
obj_type = self.vocab['object_idx_to_name'][objs[i]]
if random.random() > 0.5:
# tall/short
obj_height = boxes[i][4] - boxes[i][1]
if obj_height > self.size_data_30[obj_type]["height_7"]:
attributes.append("tall")
elif obj_height < self.size_data_30[obj_type]["height_3"]:
attributes.append("short")
else:
attributes.append("none")
else:
# large/small
obj_volume = (boxes[i][3] - boxes[i][0]) * (boxes[i][4] - boxes[i][1]) * (
boxes[i][5] - boxes[i][2])
if obj_volume > self.size_data_30[obj_type]["volume_7"]:
attributes.append("large")
elif obj_volume < self.size_data_30[obj_type]["volume_3"]:
attributes.append("small")
else:
attributes.append("none")
attributes.append("none")
attributes = [self.vocab["attrib_name_to_idx"][name] for name in attributes]
attributes = torch.LongTensor(attributes)
assert attributes.size(0) == objs.size(0)
return room_id, objs, boxes, triples, angles, attributes
def suncg_collate_fn(batch):
"""
Collate function to be used when wrapping SuncgDataset in a
DataLoader. Returns a tuple of the following:
- objs: LongTensor of shape (O,) giving object categories
- boxes: FloatTensor of shape (O, 4)
- triples: LongTensor of shape (T, 3) giving triples
- obj_to_img: LongTensor of shape (O,) mapping objects to room
- triple_to_img: LongTensor of shape (T,) mapping triples to room
"""
all_ids, all_objs, all_boxes, all_triples, all_angles, all_attributes = [], [], [], [], [], []
all_obj_to_room, all_triple_to_room = [], []
obj_offset = 0
for i, (room_id, objs, boxes, triples, angles, attributes) in enumerate(batch):
if objs.dim() == 0 or triples.dim() == 0:
continue
O, T = objs.size(0), triples.size(0)
all_objs.append(objs)
all_angles.append(angles)
all_attributes.append(attributes)
all_boxes.append(boxes)
all_ids.append(room_id)
triples = triples.clone()
triples[:, 0] += obj_offset
triples[:, 2] += obj_offset
all_triples.append(triples)
all_obj_to_room.append(torch.LongTensor(O).fill_(i))
all_triple_to_room.append(torch.LongTensor(T).fill_(i))
obj_offset += O
all_ids = torch.LongTensor(all_ids)
all_objs = torch.cat(all_objs)
all_boxes = torch.cat(all_boxes)
all_triples = torch.cat(all_triples)
all_angles = torch.cat(all_angles)
all_attributes = torch.cat(all_attributes)
all_obj_to_room = torch.cat(all_obj_to_room)
all_triple_to_room = torch.cat(all_triple_to_room)
out = (all_ids, all_objs, all_boxes, all_triples, all_angles, all_attributes, all_obj_to_room, all_triple_to_room)
return out
| 38.621302
| 118
| 0.522369
|
794ca63e6c15a75697d8d29d11c6f7d95d648a7d
| 5,456
|
py
|
Python
|
python/tvm/topi/cuda/softmax.py
|
aurel333/incubator-tvm
|
df4b8076ba729d94de58a9e99306e37a1c747101
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/topi/cuda/softmax.py
|
aurel333/incubator-tvm
|
df4b8076ba729d94de58a9e99306e37a1c747101
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/topi/cuda/softmax.py
|
aurel333/incubator-tvm
|
df4b8076ba729d94de58a9e99306e37a1c747101
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, trailing-whitespace
"""Schedule for softmax operator"""
from tvm import target as target_
from tvm import te
from tvm.contrib import cudnn
from .. import generic
from .injective import schedule_injective_from_existing
def schedule_softmax(outs):
"""Schedule for softmax op.
Parameters
----------
outs: Array of Tensor
The computation graph description of softmax in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
softmax = outs[0]
tgt = target_.Target.current(allow_none=False)
op_tag = softmax.op.tag
if op_tag == 'softmax_output':
expsum = softmax.op.input_tensors[1]
exp = softmax.op.input_tensors[0]
max_elem = s[exp].op.input_tensors[1]
elif op_tag == 'log_softmax_output':
exp = None
max_elem = softmax.op.input_tensors[1]
expsum = softmax.op.input_tensors[2]
else:
raise ValueError('Tag is expected to be softmax_output or log_softmax_output. \
Got {0}'.format(op_tag))
# The nvptx and rocm backends only supports 32-bits warp shuffle
# instructions.
#
# TODO(tvm-team) Fix nvptx codegen or deprecate nvptx backend.
def sched_warp_softmax():
if tgt.kind.name == "nvptx" or tgt.kind.name == "rocm":
return softmax.dtype == "float32" or softmax.dtype == "int32"
if tgt.kind.name != "cuda":
# this is used as the gpu schedule for other arches which may not have warp reductions
return False
return True
if len(softmax.shape) > 2:
ops = [max_elem.op, expsum.op, softmax.op]
if exp is not None:
ops.append(exp.op)
for op in ops:
s = schedule_injective_from_existing(s, op.output(0))
elif sched_warp_softmax():
# A warp of 32 threads performs a row reduction.
num_thread = tgt.thread_warp_size
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis((0, num_thread), "threadIdx.x")
# (4) softmax
xo, xi = s[softmax].split(softmax.op.axis[1], nparts=num_thread)
_, xii = s[softmax].split(xi, factor=4)
s[softmax].vectorize(xii)
s[softmax].bind(xo, thread_x)
s[softmax].bind(softmax.op.axis[0], block_x)
# (3) expsum
k = expsum.op.reduce_axis[0]
ko, _ = s[expsum].split(k, nparts=num_thread)
s[expsum].bind(ko, thread_x)
s[expsum].compute_at(s[softmax], xo)
# (2) exp
if exp is not None:
xo, xi = s[exp].split(exp.op.axis[1], nparts=num_thread)
_, xii = s[exp].split(xi, factor=4)
s[exp].vectorize(xii)
s[exp].bind(xo, thread_x)
s[exp].compute_at(s[expsum], expsum.op.axis[0])
s[exp].compute_at(s[softmax], softmax.op.axis[0])
s[exp].set_scope("warp")
# (1) max_elem
k = max_elem.op.reduce_axis[0]
ko, _ = s[max_elem].split(k, nparts=num_thread)
s[max_elem].bind(ko, thread_x)
if exp is not None:
s[max_elem].compute_at(s[exp], xo)
else:
s[max_elem].bind(ko, thread_x)
s[max_elem].bind(max_elem.op.axis[0], block_x)
else:
num_thread = 64
# Adapt num_thread to device
if tgt.max_num_threads is not None:
num_thread = min(tgt.max_num_threads, num_thread)
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis((0, num_thread), "threadIdx.x")
if exp is not None:
s[exp].bind(exp.op.axis[0], block_x)
s[max_elem].bind(max_elem.op.axis[0], block_x)
k = expsum.op.reduce_axis[0]
ko, ki = s[expsum].split(k, factor=num_thread)
EF = s.rfactor(expsum, ki)
s[expsum].bind(s[expsum].op.axis[0], block_x)
s[expsum].bind(s[expsum].op.reduce_axis[0], thread_x)
s[EF].compute_at(s[expsum], s[expsum].op.reduce_axis[0])
s[expsum].set_store_predicate(thread_x.var.equal(0))
tx, xi = s[softmax].split(softmax.op.axis[1], nparts=num_thread)
s[softmax].bind(softmax.op.axis[0], block_x)
s[softmax].bind(tx, thread_x)
return s
def softmax_cudnn(x, axis=-1):
"""Perform softmax on the data using cudnn"""
return cudnn.softmax(x, axis)
def schedule_softmax_cudnn(outs):
"""Schedule for softmax cudnn op"""
return generic.schedule_extern(outs)
| 36.13245
| 98
| 0.633981
|
794ca6938bf651dfd12d79298ce89fb0efebe9d2
| 3,369
|
py
|
Python
|
worklog.py
|
soasme/worklog
|
71f9af86c3b66c2165527de044a2a68864c782e6
|
[
"MIT"
] | null | null | null |
worklog.py
|
soasme/worklog
|
71f9af86c3b66c2165527de044a2a68864c782e6
|
[
"MIT"
] | null | null | null |
worklog.py
|
soasme/worklog
|
71f9af86c3b66c2165527de044a2a68864c782e6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import hashlib
import json
import time
import calendar
from datetime import datetime
from flask import Flask, abort, jsonify, request, session
from flask_sqlalchemy import SQLAlchemy
with open(os.environ.get('WORKLOG_ENV') or '.env') as f:
env = json.load(f)
app = Flask(__name__)
app.config.update(env)
db = SQLAlchemy(app)
# TZ
# Helpers
def to_ts(dt):
return calendar.timegm(dt.timetuple())
# Models
class Record(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text, nullable=False)
tags = db.Column(db.String(127), default='')
created_at = db.Column(db.DateTime(),
nullable=False, default=datetime.utcnow)
def serialize(self):
return {'id': self.id, 'content': self.content,
'tags': self.tags.split('|'), 'created_at': to_ts(self.created_at)}
# Views
def _gen_token():
return hashlib.md5(env['account.password'].encode('utf8')).hexdigest()
def _valid_auth():
return request.headers.get('Authorization', '') == 'Bearer ' + _gen_token()
def _validate_login(username, password):
return env['account.username'] == username and \
env['account.password'] == password
def _search_records(keywords, tags, limit, offset):
query = Record.query.filter(Record.content.like('%%%s%%' % keywords),
Record.tags.like('%%%s%%' % tags))
return query.count(), query.limit(limit).offset(offset).all()
def _get_records(tags, offset, limit):
query = Record.query
if tags:
query = query.filter(Record.tags.like('%%%s%%' % tags))
return query.count(), query.limit(limit).offset(offset).all()
def _add_record(content, tags, **kwargs):
record = Record(content=content, tags='|'.join(tags))
db.session.add(record)
db.session.commit()
return record
def _update_record(id, **kwargs):
record = Record.query.get(id)
for k, v in kwargs.items():
setattr(record, k, v)
db.session.add(record)
db.session.commit()
return record
def _delete_record(id):
record = Record.query.get(id)
if record:
db.session.delete(record)
db.session.commit()
@app.before_request
def require_login():
if not _valid_auth():
abort(401)
@app.route('/')
def index():
return 'Hello world'
@app.route('/api/1/records')
def get_records():
keyword, tags = request.args.get('keyword'), request.args.get('tags')
offset = request.args.get('offset', type=int, default=0)
limit = request.args.get('limit', type=int, default=20)
if keyword:
n, records = _search_records(keyword, tags, offset, limit)
else:
n, records = _get_records(tags, offset, limit)
return jsonify(msg="OK", data={
'records': [r.serialize() for r in records],
'count': n})
@app.route('/api/1/records', methods=['POST'])
def add_record():
record = _add_record(**request.get_json())
return jsonify(msg="OK", data={'record': record.serialize()})
@app.route('/api/1/records/<int:id>', methods=['PUT'])
def update_record(id):
record = _update_record(id, **request.get_json())
return jsonify(msg="OK", data={'record': record.serialize()})
@app.route('/api/1/records/<int:id>', methods=['DELETE'])
def delete_record(id):
_delete_record(id)
return jsonify(msg="OK")
# RUN
db.create_all()
| 26.527559
| 83
| 0.655387
|
794ca834bca8dff8357048033ecb3a599ebf73ff
| 848
|
py
|
Python
|
demos/chat/aiohttpdemo_chat/main.py
|
Ixyk-Wolf/aiohttp-demos
|
e26ef202e6fd4759f4c77f44cdbdbec482196b41
|
[
"Apache-2.0"
] | 649
|
2017-10-27T10:55:59.000Z
|
2022-03-29T07:14:09.000Z
|
demos/chat/aiohttpdemo_chat/main.py
|
Ixyk-Wolf/aiohttp-demos
|
e26ef202e6fd4759f4c77f44cdbdbec482196b41
|
[
"Apache-2.0"
] | 87
|
2017-10-27T11:12:06.000Z
|
2021-08-17T18:36:59.000Z
|
demos/chat/aiohttpdemo_chat/main.py
|
Ixyk-Wolf/aiohttp-demos
|
e26ef202e6fd4759f4c77f44cdbdbec482196b41
|
[
"Apache-2.0"
] | 284
|
2017-11-05T13:24:51.000Z
|
2022-03-12T03:37:55.000Z
|
import logging
import jinja2
import aiohttp_jinja2
from aiohttp import web
from aiohttpdemo_chat.views import index
async def init_app():
app = web.Application()
app['websockets'] = {}
app.on_shutdown.append(shutdown)
aiohttp_jinja2.setup(
app, loader=jinja2.PackageLoader('aiohttpdemo_chat', 'templates'))
app.router.add_get('/', index)
return app
async def shutdown(app):
for ws in app['websockets'].values():
await ws.close()
app['websockets'].clear()
async def get_app():
"""Used by aiohttp-devtools for local development."""
import aiohttp_debugtoolbar
app = await init_app()
aiohttp_debugtoolbar.setup(app)
return app
def main():
logging.basicConfig(level=logging.DEBUG)
app = init_app()
web.run_app(app)
if __name__ == '__main__':
main()
| 17.306122
| 74
| 0.676887
|
794ca857cd1763c408da14b6e892798e39582bf3
| 8,497
|
py
|
Python
|
telethon/events/inlinequery.py
|
rotem443/Telethon
|
35ba9848d9126462b6d51a35d3e16762b18660a9
|
[
"MIT"
] | null | null | null |
telethon/events/inlinequery.py
|
rotem443/Telethon
|
35ba9848d9126462b6d51a35d3e16762b18660a9
|
[
"MIT"
] | null | null | null |
telethon/events/inlinequery.py
|
rotem443/Telethon
|
35ba9848d9126462b6d51a35d3e16762b18660a9
|
[
"MIT"
] | null | null | null |
import inspect
import re
import asyncio
from .common import EventBuilder, EventCommon, name_inner_event
from .. import utils
from ..tl import types, functions, custom
from ..tl.custom.sendergetter import SenderGetter
@name_inner_event
class InlineQuery(EventBuilder):
"""
Occurs whenever you sign in as a bot and a user
sends an inline query such as ``@bot query``.
Args:
users (`entity`, optional):
May be one or more entities (username/peer/etc.), preferably IDs.
By default, only inline queries from these users will be handled.
blacklist_users (`bool`, optional):
Whether to treat the users as a blacklist instead of
as a whitelist (default). This means that every chat
will be handled *except* those specified in ``users``
which will be ignored if ``blacklist_users=True``.
pattern (`str`, `callable`, `Pattern`, optional):
If set, only queries matching this pattern will be handled.
You can specify a regex-like string which will be matched
against the message, a callable function that returns ``True``
if a message is acceptable, or a compiled regex pattern.
"""
def __init__(
self, users=None, *, blacklist_users=False, func=None, pattern=None):
super().__init__(users, blacklist_chats=blacklist_users, func=func)
if isinstance(pattern, str):
self.pattern = re.compile(pattern).match
elif not pattern or callable(pattern):
self.pattern = pattern
elif hasattr(pattern, 'match') and callable(pattern.match):
self.pattern = pattern.match
else:
raise TypeError('Invalid pattern type given')
@classmethod
def build(cls, update):
if isinstance(update, types.UpdateBotInlineQuery):
event = cls.Event(update)
else:
return
event._entities = update._entities
return event
def filter(self, event):
if self.pattern:
match = self.pattern(event.text)
if not match:
return
event.pattern_match = match
return super().filter(event)
class Event(EventCommon, SenderGetter):
"""
Represents the event of a new callback query.
Members:
query (:tl:`UpdateBotCallbackQuery`):
The original :tl:`UpdateBotCallbackQuery`.
Make sure to access the `text` of the query if
that's what you want instead working with this.
pattern_match (`obj`, optional):
The resulting object from calling the passed ``pattern``
function, which is ``re.compile(...).match`` by default.
"""
def __init__(self, query):
super().__init__(chat_peer=types.PeerUser(query.user_id))
SenderGetter.__init__(self, query.user_id)
self.query = query
self.pattern_match = None
self._answered = False
def _set_client(self, client):
super()._set_client(client)
self._sender, self._input_sender = utils._get_entity_pair(
self.sender_id, self._entities, client._entity_cache)
@property
def id(self):
"""
Returns the unique identifier for the query ID.
"""
return self.query.query_id
@property
def text(self):
"""
Returns the text the user used to make the inline query.
"""
return self.query.query
@property
def offset(self):
"""
The string the user's client used as an offset for the query.
This will either be empty or equal to offsets passed to `answer`.
"""
return self.query.offset
@property
def geo(self):
"""
If the user location is requested when using inline mode
and the user's device is able to send it, this will return
the :tl:`GeoPoint` with the position of the user.
"""
return
@property
def builder(self):
"""
Returns a new `InlineBuilder
<telethon.tl.custom.inlinebuilder.InlineBuilder>` instance.
"""
return custom.InlineBuilder(self._client)
async def answer(
self, results=None, cache_time=0, *,
gallery=False, next_offset=None, private=False,
switch_pm=None, switch_pm_param=''):
"""
Answers the inline query with the given results.
Args:
results (`list`, optional):
A list of :tl:`InputBotInlineResult` to use.
You should use `builder` to create these:
.. code-block:: python
builder = inline.builder
r1 = builder.article('Be nice', text='Have a nice day')
r2 = builder.article('Be bad', text="I don't like you")
await inline.answer([r1, r2])
You can send up to 50 results as documented in
https://core.telegram.org/bots/api#answerinlinequery.
Sending more will raise ``ResultsTooMuchError``,
and you should consider using `next_offset` to
paginate them.
cache_time (`int`, optional):
For how long this result should be cached on
the user's client. Defaults to 0 for no cache.
gallery (`bool`, optional):
Whether the results should show as a gallery (grid) or not.
next_offset (`str`, optional):
The offset the client will send when the user scrolls the
results and it repeats the request.
private (`bool`, optional):
Whether the results should be cached by Telegram
(not private) or by the user's client (private).
switch_pm (`str`, optional):
If set, this text will be shown in the results
to allow the user to switch to private messages.
switch_pm_param (`str`, optional):
Optional parameter to start the bot with if
`switch_pm` was used.
Example:
.. code-block:: python
@bot.on(events.InlineQuery)
async def handler(event):
builder = event.builder
rev_text = event.text[::-1]
await event.answer([
builder.article('Reverse text', text=rev_text),
builder.photo('/path/to/photo.jpg')
])
"""
if self._answered:
return
if results:
futures = [self._as_future(x, self._client.loop)
for x in results]
await asyncio.wait(futures, loop=self._client.loop)
# All futures will be in the `done` *set* that `wait` returns.
#
# Precisely because it's a `set` and not a `list`, it
# will not preserve the order, but since all futures
# completed we can use our original, ordered `list`.
results = [x.result() for x in futures]
else:
results = []
if switch_pm:
switch_pm = types.InlineBotSwitchPM(switch_pm, switch_pm_param)
return await self._client(
functions.messages.SetInlineBotResultsRequest(
query_id=self.query.query_id,
results=results,
cache_time=cache_time,
gallery=gallery,
next_offset=next_offset,
private=private,
switch_pm=switch_pm
)
)
@staticmethod
def _as_future(obj, loop):
if inspect.isawaitable(obj):
return asyncio.ensure_future(obj, loop=loop)
f = loop.create_future()
f.set_result(obj)
return f
| 36.004237
| 81
| 0.538072
|
794ca8b736673cf635b9e615795cf40656e6fdd3
| 1,030
|
py
|
Python
|
tools/print_config.py
|
tianyuluan/mmdet
|
f9330c4dfd07bf8f71d058125e9cf7dfb6fa9536
|
[
"Apache-2.0"
] | null | null | null |
tools/print_config.py
|
tianyuluan/mmdet
|
f9330c4dfd07bf8f71d058125e9cf7dfb6fa9536
|
[
"Apache-2.0"
] | null | null | null |
tools/print_config.py
|
tianyuluan/mmdet
|
f9330c4dfd07bf8f71d058125e9cf7dfb6fa9536
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
'''
Author: luantianyu
LastEditors: Luan Tianyu
email: 1558747541@qq.com
github: https://github.com/tianyuluan/
Date: 2021-09-13 19:04:07
LastEditTime: 2021-11-30 22:02:10
motto: Still water run deep
Description: Modify here please
FilePath: /mmdetection/tools/print_config.py
'''
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from mmcv import Config, DictAction
import os
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='arguments in dict')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
cfg.dump('/home/lty/lty/mmdetection/work_dirs/retinanet.py')
print(f'Config:\n{cfg.pretty_text}')
if __name__ == '__main__':
main()
| 25.121951
| 76
| 0.713592
|
794ca97b65c971e3b8b2d205e449afadc3081690
| 660
|
py
|
Python
|
manage.py
|
morrisonGithinji/projectile
|
bef3cf66841ca52f51bfd91d30e0a426167f3d41
|
[
"MIT"
] | 1
|
2020-11-19T15:09:23.000Z
|
2020-11-19T15:09:23.000Z
|
manage.py
|
morrisonGithinji/projectile
|
bef3cf66841ca52f51bfd91d30e0a426167f3d41
|
[
"MIT"
] | null | null | null |
manage.py
|
morrisonGithinji/projectile
|
bef3cf66841ca52f51bfd91d30e0a426167f3d41
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rate.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.695652
| 73
| 0.677273
|
794caaded2dc6c67105321e092ac31f1dd912661
| 4,958
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/containerservice/latest/list_managed_cluster_access_profile.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/containerservice/latest/list_managed_cluster_access_profile.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/containerservice/latest/list_managed_cluster_access_profile.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListManagedClusterAccessProfileResult',
'AwaitableListManagedClusterAccessProfileResult',
'list_managed_cluster_access_profile',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:containerservice:listManagedClusterAccessProfile'.""", DeprecationWarning)
@pulumi.output_type
class ListManagedClusterAccessProfileResult:
"""
Managed cluster Access Profile.
"""
def __init__(__self__, id=None, kube_config=None, location=None, name=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kube_config and not isinstance(kube_config, str):
raise TypeError("Expected argument 'kube_config' to be a str")
pulumi.set(__self__, "kube_config", kube_config)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="kubeConfig")
def kube_config(self) -> Optional[str]:
"""
Base64-encoded Kubernetes configuration file.
"""
return pulumi.get(self, "kube_config")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableListManagedClusterAccessProfileResult(ListManagedClusterAccessProfileResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListManagedClusterAccessProfileResult(
id=self.id,
kube_config=self.kube_config,
location=self.location,
name=self.name,
tags=self.tags,
type=self.type)
def list_managed_cluster_access_profile(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
role_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListManagedClusterAccessProfileResult:
"""
Managed cluster Access Profile.
Latest API Version: 2020-03-01.
:param str resource_group_name: The name of the resource group.
:param str resource_name: The name of the managed cluster resource.
:param str role_name: The name of the role for managed cluster accessProfile resource.
"""
pulumi.log.warn("list_managed_cluster_access_profile is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:containerservice:listManagedClusterAccessProfile'.")
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
__args__['roleName'] = role_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:containerservice/latest:listManagedClusterAccessProfile', __args__, opts=opts, typ=ListManagedClusterAccessProfileResult).value
return AwaitableListManagedClusterAccessProfileResult(
id=__ret__.id,
kube_config=__ret__.kube_config,
location=__ret__.location,
name=__ret__.name,
tags=__ret__.tags,
type=__ret__.type)
| 35.927536
| 231
| 0.651271
|
794cab8d690cdcf3ae36831a6e965eeb0f31537b
| 11,230
|
py
|
Python
|
python/scripts/hpx_optsweep.py
|
alexmyczko/hpx
|
fd68e000ead9a03a6b5bad49cf4905df1c0b78c4
|
[
"BSL-1.0"
] | 1
|
2019-08-17T21:18:03.000Z
|
2019-08-17T21:18:03.000Z
|
python/scripts/hpx_optsweep.py
|
alexmyczko/hpx
|
fd68e000ead9a03a6b5bad49cf4905df1c0b78c4
|
[
"BSL-1.0"
] | 1
|
2017-07-24T07:16:26.000Z
|
2017-07-24T08:03:33.000Z
|
python/scripts/hpx_optsweep.py
|
biddisco/hpx
|
2d244e1e27c6e014189a6cd59c474643b31fad4b
|
[
"BSL-1.0"
] | null | null | null |
#! /usr/bin/env python
#
# Copyright (c) 2009 Maciej Brodowicz
# Copyright (c) 2011 Bryce Lelbach
# Copyright (c) 2019 Patrick Diehl
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
"""
Perform parameter sweep runs of an application.
"""
import sys, os, getopt, time, string
import os.path as osp
from re import compile
from types import *
from operator import *
from datetime import datetime
from pickle import dump
if osp.exists(osp.join(sys.path[0], "../hpx")):
sys.path.append(osp.join(sys.path[0], ".."))
if osp.exists(osp.join(sys.path[0], "../share/hpx/python/hpx")):
sys.path.append(osp.join(sys.path[0], "../share/hpx/python"))
from hpx.process import process
OPTSWEEP_VERSION = 0x10 # version (mostly for version tracking in pickle output)
# print usage info and exit with an error code
def usage(rc = 2):
print ('\nUsage:', sys.argv[0], '[options] application [const_options]',)
print ('''
Options:
-a name,list : specify range of values, identified by "name", for a single
option of the application;
"list" is a python expression producing list of values
-n : don\'t stream results to stdout
-r number : repeat each test "number" of times
-o filename : capture stdout and stderr to file "filename"
-t filename : save results to file "filename" in the pickle format
-d number : delay test start by "number" of seconds
-x list : exclude cases with argument tuples matching any item in the
"list" (python expression)
-w seconds : kill runs that take longer than "seconds" to complete (default
360).
-b command : run preprocessing "command" before starting test sequence for
each configuration, applying option substitution
-p command : run postprocessing "command" after test sequence for each
configuration, applying option substitution
-h : prints this message
''')
sys.exit(rc)
# write string to each open file descriptor in the list
def writeres(s, fdlist):
for x in fdlist:
x.write(s)
x.flush()
if x.fileno() != 1:
os.fsync(x.fileno())
# select next option set to run
def next(ixd, opts, optv):
if not ixd: return None
for k in opts:
ixd[k] += 1
if ixd[k] >= len(optv[k]): ixd[k] = 0
else: return ixd
return None
# run the application and optionally capture its output and error streams
def run(cmd, outfl = None, timeout = 360):
start = datetime.now()
proc = process(cmd)
(timed_out, returncode) = proc.wait(timeout)
now = datetime.now()
while outfl:
s = proc.read()
if s: writeres(s, outfl)
else: break
if timed_out:
writeres('Command timed out.\n', outfl)
return (returncode, now - start)
# wrapper for conversion of integer options
def intopt(opta, optn):
try:
return int(opta)
except Exception as err:
print ('Error: invalid argument to option "'+optn+'":', opta, '('+str(err)+')')
usage()
# human-readable version of current timestamp
def timestr(t):
return t.strftime("%Y-%m-%d %H:%M:%S")
# quote option arguments to protect blanks
def quoteopts(olist, qchar = '"'):
s = ''
for o in olist:
if type(o) is not StringType: o = str(o)
for c in o:
if c not in nonquot:
s += ' '+qchar+o+qchar
break
else: s += ' '+o
return s
# create separator with centered string
def sepstr(sepch = '-', s = ''):
if s: s = ' '+s.strip()+' '
nl = (seplen-len(s))/2
nr = seplen-len(s)-nl
# make sure it still looks like separator for oversized lines
if nl < 3: nl = 3
if nr < 3: nr = 3
return nl*sepch+s+nr*sepch
# substitute all option ids in string with formatting keys
def optidsub(optids, s):
# first pass - option subsitution
for o in optids:
s = s.replace(o, '%('+o+')s')
return s
# run pre- or postprocessor
def runscript(cmdlst, options, ofhs, timeout):
for cmd in cmdlst:
scr = cmd%options
(rc, walltime) = run(scr, timeout)
if rc:
writeres('Warning: command: "'+scr+'" returned '+str(rc)+'\n', ofhs)
if __name__ == '__main__':
# parse command line
try:
opts, args = getopt.getopt(sys.argv[1:], 'a:b:d:hno:p:r:t:x:w:i:')
except getopt.GetoptError as err:
print ('Error:', str(err))
usage()
# option value lists, option names, # test repetitions, temporal pad
options, optnames, nrep, tpad = {}, [], 1, 0
# stdout usage flag, result file name, list of output file descriptors
stdoutf, ofile, rfile, rf, ofhs = True, None, None, None, []
# exclusion list, preprocessing command list, postprocessing command list
excl, before, after = [], [], []
# execution counters: app. runs, unique configs, errors, excluded configs
runs, configs, erruns, excnt = 0, 0, 0, 0
# separator length for pretty printing
seplen = 78
# timeout
timeout = 360
# non-quotable characters
nonquot = string.ascii_letters+string.digits+'-+='
# process options
for o, a in opts:
if o == '-a':
wl = a.split(',', 1)
if len(wl) != 2:
print ('Error: malformed argument to "-a" option:', a)
usage()
try:
options[wl[0]] = eval(wl[1])
except Exception as err:
print ('Error: failed to evaluate "'+wl[1]+'", check syntax')
usage()
if type(options[wl[0]]) not in (ListType, TupleType):
options[wl[0]] = (options[wl[1]],)
if not len(options[wl[0]]):
print ('Error: empty value list for option "'+wl[0]+'"')
usage()
optnames.append(wl[0])
if len(options[wl[0]]) == 1:
print ('Warning: single value for option "'+wl[0]+'":', options[wl[0]])
elif o == '-n': stdoutf = False
elif o == '-d': tpad = intopt(a, o)
elif o == '-r': nrep = intopt(a, o)
elif o == '-o': ofile = a
elif o == '-t': rfile = a
elif o == '-w': timeout = intopt(a, o)
elif o == '-x':
try:
excl = map(tuple, eval(a))
except Exception as err:
print ('Error: invalid exclusion list: ', str(a))
usage()
elif o == '-b': before += [a]
elif o == '-p': after += [a]
elif o == '-h': usage(0)
if not args:
print ('Error: no test application specified')
usage()
if ofile:
try:
of = open(ofile, 'w')
ofhs.append(of)
except Exception as err:
print ('Error: failed to open output file "'+ofile+'"')
sys.exit(1)
if rfile:
try:
rf = open(rfile, 'w')
except Exception as err:
print ('Error: failed to open result file "'+rfile+'"')
sys.exit(1)
if stdoutf: ofhs.append(sys.stdout)
# form prototypes of application command line, pre- and postprocessor
cmdproto = map(lambda o: optidsub(optnames, o), args)
if before: before = map(lambda o: optidsub(optnames, o), before)
if after: after = map(lambda o: optidsub(optnames, o), after)
# initialize current option index dictionary
results = {}
optix = {}
for k in options: optix[k] = 0
start_date = datetime.now()
# beginning banner
writeres(sepstr('=')+'\n', ofhs)
writeres('Start date: '+timestr(start_date)+'\n', ofhs)
writeres('Command:'+quoteopts(sys.argv)+'\n', ofhs)
if rf:
results['data'] = {}
results['header'] = {}
results['schema'] = {}
results['schema']['keys'] = tuple(optnames)
results['schema']['values'] = ('wall_time','return_code')
results['header']['version'] = OPTSWEEP_VERSION
results['header']['start_date'] = start_date
results['header']['command'] = tuple(sys.argv)
try:
# test loop
while optix != None:
configs += 1
# create current instance of generated options
vallst, optd = [], {}
for k in optnames:
val = options[k][optix[k]]
if type(val) is not StringType: val = str(val)
optd[k] = val
vallst += [optd[k]]
# check for exclusions
if tuple(vallst) in excl:
writeres(sepstr('=')+'\nSkipping:'+quoteopts(cmd)+'\n', ofhs)
optix = next(optix, optnames, options)
excnt += 1
continue
# run setup program
# TODO: add timeout options
if before: runscript(before, optd, ofhs)
# build command line
cmd = map(lambda x: x%optd, cmdproto)
# second pass - eval
p = compile(r'eval\("([^"]*)"\)')
for e in range(len(cmd)):
while p.search(cmd[e]):
ss = p.search(cmd[e]).expand(r'\1')
cmd[e] = cmd[e].replace("eval(\"%s\")" % ss, str(eval(ss)))
writeres(sepstr('=')+'\nExecuting:'+quoteopts(cmd)+'\n', ofhs)
# run test requested number of times
for i in range(nrep):
start = datetime.now()
txt = 'BEGIN RUN '+str(i+1)+' @ '+timestr(datetime.now())
writeres(sepstr('-', txt)+'\n', ofhs)
(rc, walltime) = run(cmd, ofhs, timeout)
txt = 'END RUN '+str(i+1)+' @ '+timestr(datetime.now())
txt += ' (ELAPSED '+str(datetime.now()-start)+')'
runs += 1
if rc: erruns += 1
outs = sepstr('-', txt)
outs += '\nReturn code: '+str(rc)+'\n'+sepstr()+'\n'
writeres(outs, ofhs)
if rf:
if not results['data'].has_key(tuple(vallst)):
results['data'][tuple(vallst)] = [(walltime, rc)]
else:
results['data'][tuple(vallst)].append((walltime, rc))
time.sleep(tpad)
# run postprocessor
# TODO: add timeout options
if after: runscript(after, optd, ofhs)
optix = next(optix, optnames, options)
except:
from traceback import print_exc
print_exc()
end_date = datetime.now()
# final banner
writeres('='*seplen+'\n', ofhs)
writeres('End date: '+timestr(end_date)+'\n', ofhs)
writeres('Configurations: '+str(configs)+'\n', ofhs)
writeres('Exclusions: '+str(excnt)+'\n', ofhs)
writeres('Total runs: '+str(runs)+'\n', ofhs)
writeres('Failed runs: '+str(erruns)+'\n', ofhs)
writeres('='*seplen+'\n', ofhs)
if rf:
results['header']['end_date'] = end_date
results['header']['configurations'] = configs
results['header']['exclusions'] = excnt
results['header']['total_runs'] = runs
results['header']['failed_runs'] = erruns
# dump the results dictionary to the result file, using
# pickle protocol version 2 with binary output
dump(results, rf, 2)
rf.close()
# cleanup
for f in ofhs:
if f != sys.stdout: f.close()
| 32.740525
| 87
| 0.566607
|
794cab9ad763aa4abf91fed21e21dc3b43a47d2c
| 20,562
|
py
|
Python
|
qradar/utils/qradar_cef_generator.py
|
sahil2303/ta_cloud_exchange_plugins
|
931299ed317ea12968ce53edd7bf4318d23c1e3e
|
[
"BSD-3-Clause"
] | null | null | null |
qradar/utils/qradar_cef_generator.py
|
sahil2303/ta_cloud_exchange_plugins
|
931299ed317ea12968ce53edd7bf4318d23c1e3e
|
[
"BSD-3-Clause"
] | null | null | null |
qradar/utils/qradar_cef_generator.py
|
sahil2303/ta_cloud_exchange_plugins
|
931299ed317ea12968ce53edd7bf4318d23c1e3e
|
[
"BSD-3-Clause"
] | null | null | null |
"""
BSD 3-Clause License
Copyright (c) 2021, Netskope OSS
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""QRadar Plugin."""
import io
import collections
import csv
import datetime
import datetime as dt
import re
import socket
import time
from .qradar_constants import (
SEVERITY_MAP,
SEVERITY_UNKNOWN,
)
from .qradar_exceptions import (
CEFValueError,
CEFTypeError,
)
class CEFGenerator(object):
"""CEF Generator class."""
def __init__(self, extensions, delimiter, cef_version, logger):
"""Init method."""
self.logger = logger
self.cef_version = cef_version # Version of CEF being used
self.extensions = extensions # CSV string having information of all the available CEF fields
self.extension = collections.namedtuple(
"Extension", ("key_name", "sanitizer")
)
self.extension_converter = collections.namedtuple(
"Extension", ("key_name", "converter")
)
self._prefix_field_str_sanitizer = self.str_sanitizer(
"[^\r\n]*", escape_chars=delimiter
)
self._prefix_field_float_sanitizer = self.float_sanitizer()
self._equals_escaper = self.escaper("=")
self._severity_sanitizer = self.str_sanitizer(
"Unknown|Low|Medium|High|Very-High"
)
self.valid_extensions = self._valid_extensions()
self.extension_converters = self._type_converter()
self.delimiter = delimiter
def escaper(self, special_chars):
"""Escapes the given special characters.
Args:
special_chars: The special characters to be escaped
Returns:
Escaped special characters
"""
strip_escaped_re = re.compile(r"\\([{}\\])".format(special_chars))
do_escape_re = re.compile(r"([{}\\])".format(special_chars))
def escape(s):
stripped = strip_escaped_re.sub(r"\1", s)
return do_escape_re.sub(r"\\\1", stripped)
return escape
def ensure_in_range(self, debug_name, min, max, num):
"""To Check whether the given value is in given range or not.
Args:
debug_name: The human readable name of the value being verified
min: Min value of threshold
max: Max value of threshold
num: The value to be verified
Raises:
CEFValueError in case of value is not in given threshold
Returns:
Escaped special characters
"""
if max is None:
if min is not None and num < min:
raise CEFValueError(
"{}: {} less than {}".format(debug_name, num, min)
)
elif min is None:
if max is not None and num > max:
raise CEFValueError(
"{}: {} greater than {}".format(debug_name, num, max)
)
elif not min <= num <= max:
raise CEFValueError(
"{}: {} out of range {}-{}".format(debug_name, num, min, max)
)
def int_sanitizer(self, max=None, min=None):
"""Wrap function for ensuring the given value is integer and in given range.
Args:
min: Min value of threshold
max: Max value of threshold
Raises:
CEFTypeError in case of value other than integer
Returns:
Function to sanitize the given integer value
"""
def sanitize(n, debug_name):
if not isinstance(n, int):
raise CEFTypeError(
"{}: Expected int, got {}".format(debug_name, type(n))
)
self.ensure_in_range(debug_name, min, max, n)
return str(n)
return sanitize
def float_sanitizer(self):
"""Wrap function for ensuring the given value is float.
Raises:
CEFTypeError in case of value other than float
Returns:
Function to sanitize the given float value
"""
def sanitize(n, debug_name):
if not isinstance(n, float):
raise CEFTypeError(
"{}: Expected float, got {}".format(debug_name, type(n))
)
else:
return str(n)
return sanitize
def str_sanitizer(
self, regex_str=".*", escape_chars="", min_len=0, max_len=None
):
"""Wrap function for ensuring the given value is string and has specific properties.
Args:
regex_str: The regex to be matched in given string
escape_chars: The characters to be escaped in given string
min_len: The min possible length of given string
max_len: The max possible length of given string
Raises:
CEFTypeError in case of value other than string
Returns:
Function to sanitize the given string
"""
regex = re.compile("^{}$".format(regex_str), re.DOTALL)
escape = self.escaper(escape_chars)
def sanitize(s, debug_name):
if not isinstance(s, str):
raise CEFTypeError(
"{}: Expected str, got {}".format(debug_name, type(s))
)
if not regex.match(s):
raise CEFTypeError(
"{}: {!r} did not match regex {!r}".format(
debug_name, s, regex_str
)
)
s = s.encode("unicode_escape").decode("utf-8")
escaped = escape(s)
if max_len is None and not min_len:
return escaped
byte_len = len(escaped)
if (max_len is None) and (byte_len < min_len):
raise CEFTypeError(
"{}: String shorter than {} bytes".format(
debug_name, min_len
)
)
if (max_len is not None) and not min_len <= byte_len <= max_len:
raise CEFTypeError(
"{}: String length out of range {}-{}".format(
debug_name, min_len, max_len
)
)
return escaped
return sanitize
def datetime_sanitizer(self):
"""Wrap function for ensuring the given value is a valid date time instance.
Raises:
CEFTypeError in case of value other than datetime
Returns:
Function to sanitize the given datetime value
"""
def sanitize(t, debug_name):
if not isinstance(t, dt.datetime):
raise CEFTypeError(
"{}: Expected datetime, got {}".format(debug_name, type(t))
)
else:
return str(t.timestamp()).split(".")[0]
return sanitize
def string_converter(self):
"""Wrap function for converting given value to string.
Raises:
CEFTypeError in case when value is not string compatible
Returns:
Function to convert type of given value to string
"""
def convert(val, debug_name):
try:
return str(val)
except Exception:
raise CEFTypeError(
"{}: Error occurred while converting to string".format(
debug_name
)
)
return convert
def int_converter(self):
"""Wrap function for converting given value to integer.
Raises:
CEFTypeError in case when value is not integer compatible
Returns:
Function to convert type of given value to integer
"""
def convert(val, debug_name):
try:
return int(val)
except Exception:
raise CEFTypeError(
"{}: Error occurred while converting to integer".format(
debug_name
)
)
return convert
def float_converter(self):
"""Wrap function for converting given value to floating point.
Raises:
CEFTypeError in case when value is not float compatible
Returns:
Function to convert type of given value to float
"""
def convert(val, debug_name):
try:
return float(val)
except Exception:
raise CEFTypeError(
"{}: Error occurred while converting to float".format(
debug_name
)
)
return convert
def datetime_converter(self):
"""Wrap function for converting given value to datetime object.
Raises:
CEFTypeError in case when value is not datetime compatible
Returns:
Function to convert type of given value to datetime
"""
def convert(val, debug_name):
try:
return datetime.datetime.fromtimestamp(val)
except Exception as err:
raise CEFTypeError(
"{}: Error occurred while converting to datetime: {}".format(
debug_name, err
)
)
return convert
def _type_converter(self):
"""To Parse the CEF extension CSV string and creates the dict for data type converters.
Returns:
Dict object having details of all the available CEF fields and its type converters
"""
converters = {
"String": self.string_converter(),
"Time Stamp": self.datetime_converter(),
"Integer": self.int_converter(),
"Floating Point": self.float_converter(),
"IPv4 Address": self.string_converter(),
"IPv6 address": self.string_converter(),
"MAC Address": self.string_converter(),
"IP Address": self.string_converter(),
}
# Parse the CSV and create key-converter dict
try:
return {
record["CEF Key Name"]: self.extension_converter(
key_name=record["CEF Key Name"],
converter=converters[record["Data Type"]],
)
for record in csv.DictReader(
io.StringIO(self.extensions), strict=True
)
}
except Exception as err:
self.logger.error(
"Error occurred while parsing CEF validation CSV. Error: {}".format(
str(err)
)
)
raise
def _valid_extensions(self):
"""To Parse the given extension CSV string and creates the dict for each provided values with its sanitizers.
Returns:
Dict object having details of all the available CEF fields and its sanitizers
"""
# Initialize the sanitizers for different data types
# ipv4_addr_re = r"\.".join([r"\d{1,3}"] * 4)
ipv4_addr_re = (
r"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4]"
r"[0-9]|25[0-5])$"
)
ipv4_addr = self.str_sanitizer(ipv4_addr_re)
ipv6_addr_re = r"\:".join(
["[0-9a-fA-F]{1,4}"] * 8
) # only complete ipv6 address accepted
ipv6_addr = self.str_sanitizer(ipv6_addr_re)
ip_addr = self.str_sanitizer(
r"(" + ipv6_addr_re + r"|" + ipv4_addr_re + r")"
)
mac_addr = self.str_sanitizer(r"\:".join(["[0-9a-fA-F]{2}"] * 6))
str_lens = [31, 40, 63, 100, 128, 200, 255, 1023, 2048, 4000, 8000]
sanitizers = {
"IPv4 Address": {"": ipv4_addr},
"IPv6 address": {"": ipv6_addr},
"IP Address": {"": ip_addr},
"MAC Address": {"": mac_addr},
"Time Stamp": {"": self.datetime_sanitizer()},
"Floating Point": {"": self.float_sanitizer()},
"Integer": {
"": self.int_sanitizer(),
"65535": self.int_sanitizer(min=0, max=65535),
},
"String": dict(
[("", self.str_sanitizer())]
+ [
(str(str_len), self.str_sanitizer(max_len=str_len))
for str_len in str_lens
]
),
}
# Parse the CSV and create key-sanitizer dict
try:
return {
record["CEF Key Name"]: self.extension(
key_name=record["CEF Key Name"],
sanitizer=sanitizers[record["Data Type"]][
record["Length"]
],
)
for record in csv.DictReader(
io.StringIO(self.extensions), strict=True
)
}
except Exception as err:
self.logger.error(
"Error occurred while parsing CEF validation CSV. Error: {}".format(
str(err)
)
)
raise
def get_header_value(self, header, headers):
"""To Fetch sanitized value of header from given configured headers dict.
Args:
header: The header for which sanitized value is to be fetched
headers: Configured headers
Returns:
Sanitized value
"""
if header == "Severity":
return self._severity_sanitizer(headers[header], header)
return self._prefix_field_str_sanitizer(headers[header], header)
def log_invalid_header(
self, possible_headers, headers, data_type, subtype
):
"""Issues log in case of invalid header found in mappings.
Args:
possible_headers: Possible CEF headers
headers: Configured headers
data_type: Data type for which CEF event is being generated
subtype: Subtype of data type for which CEF event is being generated
"""
for configured_header in list(headers.keys()):
if configured_header not in possible_headers:
self.logger.error(
'[{}][{}]: Found invalid header configured in qradar mapping file: "{}". Header '
"field will be ignored.".format(
data_type, subtype, configured_header
)
)
@staticmethod
def _get_hostname():
"""To Fetch hostname if available, else fetches IP Address.
Returns:
Hostname
"""
hostname = socket.gethostname()
if hostname:
return hostname
# Get IP Address
socket_obj = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
socket_obj.connect(("8.8.8.8", 80)) # NOSONAR
hostname = socket_obj.getsockname()[0]
socket_obj.close()
return hostname
def get_cef_event(self, headers, extensions, data_type, subtype):
"""To Produce a CEF compliant message from the arguments.
Args:
data_type: type of data being transformed (alert/event)
subtype: subtype of data being transformed
headers: Headers of CEF event
extensions (dict): key-value pairs for event metadata.
"""
extension_strs = {}
for name, value in extensions.items():
# First convert the incoming value from Netskope to appropriate data type
try:
value = self.extension_converters[name].converter(value, name)
except KeyError:
self.logger.error(
'[{}][{}]: An error occurred while generating CEF data for field: "{}". Could not '
'find the field in the "valid_extensions". Field will be ignored'.format(
data_type, subtype, name
)
)
continue
except Exception as err:
self.logger.error(
'[{}][{}]: An error occurred while generating CEF data for field: "{}". Error: {}. '
"Field will be ignored".format(
data_type, subtype, name, str(err)
)
)
continue
# Validate and sanitise (if required) the incoming value from Netskope before mapping it CEF
try:
extension_strs[
self.valid_extensions[name].key_name
] = self._equals_escaper(
self.valid_extensions[name].sanitizer(value, name)
)
except KeyError:
self.logger.error(
'[{}][{}]: An error occurred while generating CEF data for field: "{}". Could not '
'find the field in the "valid_extensions". Field will be ignored'.format(
data_type, subtype, name
)
)
except Exception as err:
self.logger.error(
'[{}][{}]: An error occurred while generating CEF data for field: "{}". Error: {}. '
"Field will be ignored".format(
data_type, subtype, name, str(err)
)
)
extensions_str = " ".join(
sorted("{}={}".format(k, v) for k, v in extension_strs.items())
)
possible_headers = [
"Device Vendor",
"Device Product",
"Device Version",
"Device Event Class ID",
"Name",
"Severity",
]
self.log_invalid_header(possible_headers, headers, data_type, subtype)
hostname = self._get_hostname()
# Append the CEF version
cef_components = [
"{} {} CEF:{}".format(
time.strftime("%b %d %H:%M:%S", time.localtime(time.time())),
hostname,
self.cef_version,
)
]
# Append other headers if available
for header in possible_headers:
if header in headers:
try:
if header == "Severity":
headers[header] = SEVERITY_MAP.get(
str(headers[header]).lower(), SEVERITY_UNKNOWN
)
cef_components.append(
self.get_header_value(header, headers)
)
except Exception as err:
self.logger.error(
'[{}][{}]: An error occurred while generating CEF data for header field: "{}". Error: {}. '
"Field will be ignored".format(
data_type, subtype, header, str(err)
)
)
# Append extension string
cef_components.append(extensions_str)
# Join every CEF component with given delimiter
return self.delimiter.join(cef_components)
| 34.733108
| 117
| 0.543041
|
794cacf3311f5ae1b1b746979f78208786b62105
| 4,428
|
py
|
Python
|
src/OTLMOW/PostenMapping/Model/Post060340210.py
|
davidvlaminck/OTLClassPython
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | 2
|
2022-02-01T08:58:11.000Z
|
2022-02-08T13:35:17.000Z
|
src/OTLMOW/PostenMapping/Model/Post060340210.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
src/OTLMOW/PostenMapping/Model/Post060340210.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post060340210(StandaardPost):
def __init__(self):
super().__init__(
nummer='0603.40210',
beschrijving='Poreuze waterdoorlatende betonstraatstenen, gekleurd met anorganische pigmenten volgens 6-3.5, 220 x 110, 100 mm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WaterdoorlatendeBestrating',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotatie='laagRol',
defaultWaarde='straatlaag',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.40210')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WaterdoorlatendeBestrating',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WaterdoorlatendeBestrating.type',
dotnotatie='type',
defaultWaarde='gekleurde-met-anorganische-pigmenten',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.40210')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WaterdoorlatendeBestrating',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WaterdoorlatendeBestrating.afwerking',
dotnotatie='afwerking',
defaultWaarde='poreus',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.40210')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WaterdoorlatendeBestrating',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WaterdoorlatendeBestrating.afmetingVanBestratingselementLxB',
dotnotatie='afmetingVanBestratingselementLxB',
defaultWaarde='220-x-110',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.40210')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WaterdoorlatendeBestrating',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagDikte.dikte',
dotnotatie='dikte',
defaultWaarde='10',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.40210')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WaterdoorlatendeBestrating',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotatie='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.40210')])
| 48.659341
| 146
| 0.578365
|
794cad9bfad07bff052d98531a7bed12f0eadd7e
| 6,841
|
py
|
Python
|
tracer.py
|
nerox8664/pytracer
|
10ef0d2a309b6b2840b2d43ec7cb0f742578ee4e
|
[
"MIT"
] | null | null | null |
tracer.py
|
nerox8664/pytracer
|
10ef0d2a309b6b2840b2d43ec7cb0f742578ee4e
|
[
"MIT"
] | null | null | null |
tracer.py
|
nerox8664/pytracer
|
10ef0d2a309b6b2840b2d43ec7cb0f742578ee4e
|
[
"MIT"
] | null | null | null |
#!/bin/python
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing as mp
from functools import partial
import argparse
from plane import Plane
from sphere import Sphere
from common_utils import *
# Defines
depth_max = 3
light_depth_max = 3
shadow_steps = 8
h = 768
w = 1024
r = float(w) / h
# Light
L = np.array([0, 0.9 ,0.0])
color_light = np.zeros(3)
ambient = 0.3
diffuse_c = 0.3
specular_c = 0.2
specular_k = 30
# Camera
O = np.array([0., 0.0, -3.])# Camera. O = np.array([0., 0.35, -1.])# Camera.
Q = np.array([-1., 0.0, -1.0]) # Camera pointing to.
# Screen coordinates: x0, y0, x1, y1.
screen = (-1.0, -1.0 / r , 1.0, 1.0 / r )
# Scene
scene = []
def trace_ray(rayO, rayD, depth=0):
if depth > light_depth_max:
return None, np.zeros(3), np.zeros(3), np.zeros(3)
t = np.inf
obj_idx = -1
N = []
for i, obj in enumerate(scene):
is_intersect, t_obj, norm = obj.hit(rayO, rayD)
if is_intersect and t_obj <= t:
t, obj, N = t_obj, obj, norm
obj_idx = i
if t == np.inf or obj_idx < 0:
return None, np.zeros(3), np.zeros(3), np.zeros(3)
obj = scene[obj_idx]
M = rayO + rayD * t
dL = np.linalg.norm(L - M)
color = np.array(obj.color)
toL = normalize(L - M)
toO = normalize(O - M)
# shadow
num = shadow_steps
mult = num
for k in range(num):
for i, s_obj in enumerate(scene):
if i == obj_idx:
continue
rayOl = M + N * 0.0001 + np.random.uniform(0, 1e-6, 3)
rayDl = toL
is_intersect, t_obj, norm = s_obj.hit(rayOl, rayDl)
if is_intersect and t_obj < np.inf:
ML = rayOl + rayDl * t_obj
if np.linalg.norm(M - ML) <= dL:
mult -= 1
# return None, np.zeros(3), np.zeros(3), np.zeros(3)
continue
# mult = num
# Color
col_ray = ambient
# Lambert shading
col_ray += obj.diffuse_c * max(np.dot(N, toL), 0) * color
# Blinn-Phong shading
col_ray += obj.specular_c * max(np.dot(N, normalize(toL + toO)), 0) ** specular_k * color_light
return obj, M, N, col_ray * (mult / float(num))
def create_scene():
# scene.append(Plane([1.2, 0.0, 0.0], [-1.0, 0.0, 0.0], color=[0.8, 0.3, 0.4]))
scene.append(Plane([-1.5, 0.0, 0.0], [1.0, 0.0, 0.0], color=[1.0, 0.0, 0.0], reflection=0.1)) #left
scene.append(Plane([ 1.5, 0.0, 0.0], [-1.0, 0.0, 0.0], color=[1.0, 0.0, 0.0], reflection=0.1)) #right
scene.append(Plane([ 0.0, -1.0, 0.0], [0.0, 1.0, 0.0], color=[0.0, 0.0, 1.0], reflection=0.05)) #floor
scene.append(Plane([ 0.0, 1.0, 0.0], [0.0, -1.0, 0.0], color=[0.0, 0.0, 1.0], reflection=0.05)) #ceil
scene.append(Plane([ 0.0, 0.0, 2.5], [0.0, 0.0, -1.0], color=[0.0, 1.0, 0.0], reflection=0.1)) #far
# scene.append(Plane([-2.0, 0.0, 0.0], [-1.0, 0.0, 0.0], color=[0.4, 0.3, 0.4]))
scene.append(Sphere([0.0, -0.6, 0.3], r=0.2, color=[1.0, 0.1, 0.1], transparency=0.3, reflection=0.3))
scene.append(Sphere([-0.7, -0.5, 0.5], r=0.3, color=[0.1, 1.0, 0.1], transparency=0.2, reflection=0.4))
scene.append(Sphere([-0.4, 0.3, 1.2], r=0.2, color=[0.1, 1.0, 0.1], transparency=0.2, reflection=0.4))
scene.append(Sphere([0.5, -0.5, 1.5], r=0.3, color=[0.1, 1.0, 0.1], transparency=0.2, reflection=0.4))
# scene.append(Sphere([0.6, 0.4, -0.1], r=0.2, color=[0.0, 1.0, 0.1], transparency=0.6, n=1.5))
# scene.append(Sphere([-0.6, 0.6, 0.5], r=0.4, color=[0.0, 1.0, 1.1], transparency=0.4, reflection=0.4, n=1.3))
def refract(v, n, q):
q = 2.0 - q;
cosi = np.dot(n, v);
o = (v * q - n * (-cosi + q * cosi));
return o
def ray_trace(params, rayO, rayD, reflection, refraction, depth, n1 = 1.0):
obj, M, N, col_ray = trace_ray(rayO, rayD)
if not obj:
return np.zeros(3)
n = obj.n
transparency = obj.transparency
if depth > params.max_depth:
return transparency * col_ray
rayOrefl = M + N * 0.0001
rayOrefr = M - N * 0.00001
rayDrefl = normalize(rayD - 2 * np.dot(rayD, N) * N)
rayDrefr = refract(rayD, N, n1 / n)
refr = refraction * obj.refraction
refl = reflection * obj.reflection
if refl > epsilon:
refl_color = refl * ray_trace(params, rayOrefl, rayDrefl, refl, refr, depth + 1, n)
else:
refl_color = 0.0
if refr > epsilon:
refr_color = refr * ray_trace(params, rayOrefr, rayDrefr, refl, refr, depth + 1, n)
else:
refr_color = 0.0
return refr_color + refl_color + obj.transparency * col_ray
def ray_trace_worker(inp, params):
"""
A worker instance
:param inp: input parameters
:return: Color for the current pixel
"""
i, x, j, y = inp
Q[:2] = (x, y)
D = normalize(Q - params.O)
return np.clip(ray_trace(params, O, D, 1, 1, 1), 0, 1)
def main():
"""
Application entry point
"""
parser = argparse.ArgumentParser(description='Python ray tracer')
parser.add_argument(
'--workers', type=int, default=4,
help='Number of ray tracing workers'
)
parser.add_argument(
'--max_depth', type=int, default=3,
help='Recursion depth'
)
parser.add_argument(
'--height', type=int, default=64,
help='An image height'
)
parser.add_argument(
'--width', type=int, default=128,
help='An image width'
)
parser.add_argument(
'--image', type=str, default='output.png',
help='A destination image'
)
parser.add_argument(
'--show-incomplete', dest='show_incomplete', action='store_true', default=False,
help='Render intermediate results to the image'
)
parser.add_argument(
'--O', type=float, nargs='+', default=[-0.2, 0.0, -3.4],
help='Camera position'
)
# Parse command line arguments
params = parser.parse_args()
# Create the scene
create_scene()
# Create multiprocessing pool
pool = mp.Pool(processes=params.workers)
# Create empty buffer image
img = np.zeros((params.height, params.width, 3))
# Parallel by the image columns
for i, x in enumerate(np.linspace(screen[0], screen[2], params.width)):
if i % 5 == 0:
print(i / float(params.width) * 100, "%")
if params.show_incomplete:
plt.imsave(params.image, img)
# Create pool parameters
inputs = [(i, x, j, y) for j, y in enumerate(np.linspace(screen[1], screen[3], params.height))]
# Parallel evaluation
row = pool.map(partial(ray_trace_worker, params=params), inputs)
img[:, i] = np.flip(row, axis=0)
# Save results
plt.imsave(params.image, img)
if __name__ == '__main__':
main()
| 28.036885
| 115
| 0.568923
|
794cadc165213c4ea0015854183911ab3746c9fe
| 598
|
py
|
Python
|
diffpy/__init__.py
|
yevgenyr/diffpy.srxplanar
|
67d4dae2d9c2267e1c4d18a9901525cc35009530
|
[
"BSD-3-Clause"
] | 1
|
2015-12-12T20:54:14.000Z
|
2015-12-12T20:54:14.000Z
|
diffpy/__init__.py
|
yevgenyr/diffpy.srxplanar
|
67d4dae2d9c2267e1c4d18a9901525cc35009530
|
[
"BSD-3-Clause"
] | 1
|
2021-06-15T21:54:44.000Z
|
2021-06-16T00:32:03.000Z
|
diffpy/__init__.py
|
yevgenyr/diffpy.srxplanar
|
67d4dae2d9c2267e1c4d18a9901525cc35009530
|
[
"BSD-3-Clause"
] | 4
|
2015-09-23T17:47:15.000Z
|
2022-03-24T22:37:23.000Z
|
#!/usr/bin/env python
##############################################################################
#
# diffpy.srxplanar by DANSE Diffraction group
# Simon J. L. Billinge
# (c) 2010 Trustees of the Columbia University
# in the City of New York. All rights reserved.
#
# File coded by: Xiaohao Yang
#
# See AUTHORS.txt for a list of people who contributed.
# See LICENSE.txt for license information.
#
##############################################################################
__import__('pkg_resources').declare_namespace(__name__)
| 33.222222
| 78
| 0.478261
|
794cae43050eaf2043dc0a8a22f58ffcf9f67772
| 10,918
|
py
|
Python
|
wagtail/admin/widgets.py
|
patta42/wagtail
|
419c8d3a03e6ea7fe2aa01922592e5be8d0d73e1
|
[
"BSD-3-Clause"
] | 1
|
2018-08-15T18:27:09.000Z
|
2018-08-15T18:27:09.000Z
|
wagtail/admin/widgets.py
|
patta42/wagtail
|
419c8d3a03e6ea7fe2aa01922592e5be8d0d73e1
|
[
"BSD-3-Clause"
] | 13
|
2019-03-25T19:57:35.000Z
|
2019-12-28T19:25:23.000Z
|
wagtail/admin/widgets.py
|
patta42/wagtail
|
419c8d3a03e6ea7fe2aa01922592e5be8d0d73e1
|
[
"BSD-3-Clause"
] | null | null | null |
import itertools
import json
from functools import total_ordering
from django.conf import settings
from django.forms import widgets
from django.forms.utils import flatatt
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.formats import get_format
from django.utils.functional import cached_property
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from taggit.forms import TagWidget
from wagtail.admin.datetimepicker import to_datetimepicker_format
from wagtail.core import hooks
from wagtail.core.models import Page
from wagtail.utils.widgets import WidgetWithScript
DEFAULT_DATE_FORMAT = '%Y-%m-%d'
DEFAULT_DATETIME_FORMAT = '%Y-%m-%d %H:%M'
class AdminAutoHeightTextInput(widgets.Textarea):
template_name = 'wagtailadmin/widgets/auto_height_text_input.html'
def __init__(self, attrs=None):
# Use more appropriate rows default, given autoheight will alter this anyway
default_attrs = {'rows': '1'}
if attrs:
default_attrs.update(attrs)
super().__init__(default_attrs)
class AdminDateInput(widgets.DateInput):
template_name = 'wagtailadmin/widgets/date_input.html'
def __init__(self, attrs=None, format=None):
default_attrs = {'autocomplete': 'new-date'}
fmt = format
if attrs:
default_attrs.update(attrs)
if fmt is None:
fmt = getattr(settings, 'WAGTAIL_DATE_FORMAT', DEFAULT_DATE_FORMAT)
self.js_format = to_datetimepicker_format(fmt)
super().__init__(attrs=default_attrs, format=fmt)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
config = {
'dayOfWeekStart': get_format('FIRST_DAY_OF_WEEK'),
'format': self.js_format,
}
context['widget']['config_json'] = json.dumps(config)
return context
class AdminTimeInput(widgets.TimeInput):
template_name = 'wagtailadmin/widgets/time_input.html'
def __init__(self, attrs=None, format='%H:%M'):
default_attrs = {'autocomplete': 'new-time'}
if attrs:
default_attrs.update(attrs)
super().__init__(attrs=default_attrs, format=format)
class AdminDateTimeInput(widgets.DateTimeInput):
template_name = 'wagtailadmin/widgets/datetime_input.html'
def __init__(self, attrs=None, format=None):
default_attrs = {'autocomplete': 'new-date-time'}
fmt = format
if attrs:
default_attrs.update(attrs)
if fmt is None:
fmt = getattr(settings, 'WAGTAIL_DATETIME_FORMAT', DEFAULT_DATETIME_FORMAT)
self.js_format = to_datetimepicker_format(fmt)
super().__init__(attrs=default_attrs, format=fmt)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
config = {
'dayOfWeekStart': get_format('FIRST_DAY_OF_WEEK'),
'format': self.js_format,
}
context['widget']['config_json'] = json.dumps(config)
return context
class AdminTagWidget(TagWidget):
template_name = 'wagtailadmin/widgets/tag_widget.html'
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['autocomplete_url'] = reverse('wagtailadmin_tag_autocomplete')
context['widget']['tag_spaces_allowed'] = getattr(settings, 'TAG_SPACES_ALLOWED', True)
context['widget']['tag_limit'] = getattr(settings, 'TAG_LIMIT', None)
return context
class AdminChooser(WidgetWithScript, widgets.Input):
input_type = 'hidden'
choose_one_text = _("Choose an item")
choose_another_text = _("Choose another item")
clear_choice_text = _("Clear choice")
link_to_chosen_text = _("Edit this item")
show_edit_link = True
# when looping over form fields, this one should appear in visible_fields, not hidden_fields
# despite the underlying input being type="hidden"
is_hidden = False
def get_instance(self, model_class, value):
# helper method for cleanly turning 'value' into an instance object
if value is None:
return None
try:
return model_class.objects.get(pk=value)
except model_class.DoesNotExist:
return None
def get_instance_and_id(self, model_class, value):
if value is None:
return (None, None)
elif isinstance(value, model_class):
return (value, value.pk)
else:
try:
return (model_class.objects.get(pk=value), value)
except model_class.DoesNotExist:
return (None, None)
def value_from_datadict(self, data, files, name):
# treat the empty string as None
result = super().value_from_datadict(data, files, name)
if result == '':
return None
else:
return result
def __init__(self, **kwargs):
# allow choose_one_text / choose_another_text to be overridden per-instance
if 'choose_one_text' in kwargs:
self.choose_one_text = kwargs.pop('choose_one_text')
if 'choose_another_text' in kwargs:
self.choose_another_text = kwargs.pop('choose_another_text')
if 'clear_choice_text' in kwargs:
self.clear_choice_text = kwargs.pop('clear_choice_text')
if 'link_to_chosen_text' in kwargs:
self.link_to_chosen_text = kwargs.pop('link_to_chosen_text')
if 'show_edit_link' in kwargs:
self.show_edit_link = kwargs.pop('show_edit_link')
super().__init__(**kwargs)
class AdminPageChooser(AdminChooser):
choose_one_text = _('Choose a page')
choose_another_text = _('Choose another page')
link_to_chosen_text = _('Edit this page')
def __init__(self, target_models=None, can_choose_root=False, user_perms=None, **kwargs):
super().__init__(**kwargs)
if target_models:
model_names = [model._meta.verbose_name.title() for model in target_models if model is not Page]
if len(model_names) == 1:
self.choose_one_text += ' (' + model_names[0] + ')'
self.user_perms = user_perms
self.target_models = list(target_models or [Page])
self.can_choose_root = can_choose_root
def _get_lowest_common_page_class(self):
"""
Return a Page class that is an ancestor for all Page classes in
``target_models``, and is also a concrete Page class itself.
"""
if len(self.target_models) == 1:
# Shortcut for a single page type
return self.target_models[0]
else:
return Page
def render_html(self, name, value, attrs):
model_class = self._get_lowest_common_page_class()
instance, value = self.get_instance_and_id(model_class, value)
original_field_html = super().render_html(name, value, attrs)
return render_to_string("wagtailadmin/widgets/page_chooser.html", {
'widget': self,
'original_field_html': original_field_html,
'attrs': attrs,
'value': value,
'page': instance,
})
def render_js_init(self, id_, name, value):
if isinstance(value, Page):
page = value
else:
# Value is an ID look up object
model_class = self._get_lowest_common_page_class()
page = self.get_instance(model_class, value)
parent = page.get_parent() if page else None
return "createPageChooser({id}, {model_names}, {parent}, {can_choose_root}, {user_perms});".format(
id=json.dumps(id_),
model_names=json.dumps([
'{app}.{model}'.format(
app=model._meta.app_label,
model=model._meta.model_name)
for model in self.target_models
]),
parent=json.dumps(parent.id if parent else None),
can_choose_root=('true' if self.can_choose_root else 'false'),
user_perms=json.dumps(self.user_perms),
)
class Media:
js = [
'wagtailadmin/js/page-chooser-modal.js',
'wagtailadmin/js/page-chooser.js',
]
@total_ordering
class Button:
show = True
def __init__(self, label, url, classes=set(), attrs={}, priority=1000):
self.label = label
self.url = url
self.classes = classes
self.attrs = attrs.copy()
self.priority = priority
def render(self):
attrs = {'href': self.url, 'class': ' '.join(sorted(self.classes))}
attrs.update(self.attrs)
return format_html('<a{}>{}</a>', flatatt(attrs), self.label)
def __str__(self):
return self.render()
def __repr__(self):
return '<Button: {}>'.format(self.label)
def __lt__(self, other):
if not isinstance(other, Button):
return NotImplemented
return (self.priority, self.label) < (other.priority, other.label)
def __eq__(self, other):
if not isinstance(other, Button):
return NotImplemented
return (self.label == other.label
and self.url == other.url
and self.classes == other.classes
and self.attrs == other.attrs
and self.priority == other.priority)
class PageListingButton(Button):
def __init__(self, label, url, classes=set(), **kwargs):
classes = {'button', 'button-small', 'button-secondary'} | set(classes)
super().__init__(label, url, classes=classes, **kwargs)
class BaseDropdownMenuButton(Button):
def __init__(self, *args, **kwargs):
super().__init__(*args, url=None, **kwargs)
@cached_property
def dropdown_buttons(self):
raise NotImplementedError
def render(self):
return render_to_string(self.template_name, {
'buttons': self.dropdown_buttons,
'label': self.label,
'title': self.attrs.get('title'),
'is_parent': self.is_parent})
class ButtonWithDropdownFromHook(BaseDropdownMenuButton):
template_name = 'wagtailadmin/pages/listing/_button_with_dropdown.html'
def __init__(self, label, hook_name, page, page_perms, is_parent, **kwargs):
self.hook_name = hook_name
self.page = page
self.page_perms = page_perms
self.is_parent = is_parent
super().__init__(label, **kwargs)
@property
def show(self):
return bool(self.dropdown_buttons)
@cached_property
def dropdown_buttons(self):
button_hooks = hooks.get_hooks(self.hook_name)
return sorted(itertools.chain.from_iterable(
hook(self.page, self.page_perms, self.is_parent)
for hook in button_hooks))
| 34.333333
| 108
| 0.644349
|
794cb00b7ade71032556e8e57860682065ec919d
| 120,429
|
py
|
Python
|
pybind/slxos/v16r_1_00b/brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import lsp_sec_path_config_admin_groups
class sec_path(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/show-mpls-lsp-extensive/output/lsp/show-mpls-lsp-extensive-info/show-mpls-lsp-sec-path-info/sec-path. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__lsp_sec_path_path_name','__lsp_sec_path_state','__lsp_sec_path_state_up','__lsp_sec_path_active','__lsp_sec_path_is_current_secondary','__lsp_sec_path_is_selected_secondary','__lsp_sec_path_config_reoptimize_timer_configured','__lsp_sec_path_config_reoptimize_timer','__lsp_sec_path_config_tspec_mtu_configured','__lsp_sec_path_sec_path_config_tspec_mtu','__lsp_sec_path_config_cos_configured','__lsp_sec_path_config_cos','__lsp_sec_path_config_mtu_configured','__lsp_sec_path_config_mtu','__lsp_sec_path_config_tie_breaking_configured','__lsp_sec_path_config_tie_break_random','__lsp_sec_path_config_tie_break_least_fill','__lsp_sec_path_config_tie_break_most_fill','__lsp_sec_path_config_cspf_disabled','__lsp_sec_path_config_hot_standby','__lsp_sec_path_config_pinned','__lsp_sec_path_config_persistent','__lsp_sec_path_config_soft_prempt','__lsp_sec_path_config_priority_configured','__lsp_sec_path_config_setup_prority','__lsp_sec_path_config_holding_prority','__lsp_sec_path_config_hop_limit_configured','__lsp_sec_path_config_hop_limit','__lsp_sec_path_config_traffic_eng_rate_configured','__lsp_sec_path_config_traffic_eng_mean_rate','__lsp_sec_path_config_traffic_eng_max_rate','__lsp_sec_path_config_traffic_eng_max_burst','__lsp_sec_path_config_admin_group_configured','__lsp_sec_path_config_admin_groups',)
_yang_name = 'sec-path'
_rest_name = 'sec-path'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__lsp_sec_path_config_mtu_configured = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-mtu-configured", rest_name="lsp-sec-path-config-mtu-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_hot_standby = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-hot-standby", rest_name="lsp-sec-path-config-hot-standby", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_reoptimize_timer_configured = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-reoptimize-timer-configured", rest_name="lsp-sec-path-config-reoptimize-timer-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_admin_groups = YANGDynClass(base=lsp_sec_path_config_admin_groups.lsp_sec_path_config_admin_groups, is_container='container', presence=False, yang_name="lsp-sec-path-config-admin-groups", rest_name="lsp-sec-path-config-admin-groups", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
self.__lsp_sec_path_config_cspf_disabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-cspf-disabled", rest_name="lsp-sec-path-config-cspf-disabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_reoptimize_timer = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-reoptimize-timer", rest_name="lsp-sec-path-config-reoptimize-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
self.__lsp_sec_path_config_soft_prempt = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-soft-prempt", rest_name="lsp-sec-path-config-soft-prempt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_tie_break_random = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tie-break-random", rest_name="lsp-sec-path-config-tie-break-random", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_traffic_eng_mean_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-traffic-eng-mean-rate", rest_name="lsp-sec-path-config-traffic-eng-mean-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
self.__lsp_sec_path_is_current_secondary = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-is-current-secondary", rest_name="lsp-sec-path-is-current-secondary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_persistent = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-persistent", rest_name="lsp-sec-path-config-persistent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_hop_limit = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-config-hop-limit", rest_name="lsp-sec-path-config-hop-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)
self.__lsp_sec_path_is_selected_secondary = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-is-selected-secondary", rest_name="lsp-sec-path-is-selected-secondary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_traffic_eng_max_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-traffic-eng-max-rate", rest_name="lsp-sec-path-config-traffic-eng-max-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
self.__lsp_sec_path_config_holding_prority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-config-holding-prority", rest_name="lsp-sec-path-config-holding-prority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)
self.__lsp_sec_path_state_up = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-state-up", rest_name="lsp-sec-path-state-up", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_tie_breaking_configured = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tie-breaking-configured", rest_name="lsp-sec-path-config-tie-breaking-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_sec_path_config_tspec_mtu = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-sec-path-config-tspec-mtu", rest_name="lsp-sec-path-sec-path-config-tspec-mtu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
self.__lsp_sec_path_state = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-state", rest_name="lsp-sec-path-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)
self.__lsp_sec_path_config_setup_prority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-config-setup-prority", rest_name="lsp-sec-path-config-setup-prority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)
self.__lsp_sec_path_config_traffic_eng_max_burst = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-traffic-eng-max-burst", rest_name="lsp-sec-path-config-traffic-eng-max-burst", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
self.__lsp_sec_path_active = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-active", rest_name="lsp-sec-path-active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_tie_break_least_fill = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tie-break-least-fill", rest_name="lsp-sec-path-config-tie-break-least-fill", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_hop_limit_configured = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-hop-limit-configured", rest_name="lsp-sec-path-config-hop-limit-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_tspec_mtu_configured = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tspec-mtu-configured", rest_name="lsp-sec-path-config-tspec-mtu-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_priority_configured = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-priority-configured", rest_name="lsp-sec-path-config-priority-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_traffic_eng_rate_configured = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-traffic-eng-rate-configured", rest_name="lsp-sec-path-config-traffic-eng-rate-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_admin_group_configured = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-admin-group-configured", rest_name="lsp-sec-path-config-admin-group-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_mtu = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-mtu", rest_name="lsp-sec-path-config-mtu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
self.__lsp_sec_path_config_pinned = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-pinned", rest_name="lsp-sec-path-config-pinned", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_cos_configured = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-cos-configured", rest_name="lsp-sec-path-config-cos-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_tie_break_most_fill = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tie-break-most-fill", rest_name="lsp-sec-path-config-tie-break-most-fill", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
self.__lsp_sec_path_config_cos = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-config-cos", rest_name="lsp-sec-path-config-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)
self.__lsp_sec_path_path_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-sec-path-path-name", rest_name="lsp-sec-path-path-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_mpls_rpc', u'show-mpls-lsp-extensive', u'output', u'lsp', u'show-mpls-lsp-extensive-info', u'show-mpls-lsp-sec-path-info', u'sec-path']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'show-mpls-lsp-extensive', u'output', u'lsp', u'sec-path']
def _get_lsp_sec_path_path_name(self):
"""
Getter method for lsp_sec_path_path_name, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_path_name (string)
YANG Description: Secondary path name
"""
return self.__lsp_sec_path_path_name
def _set_lsp_sec_path_path_name(self, v, load=False):
"""
Setter method for lsp_sec_path_path_name, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_path_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_path_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_path_name() directly.
YANG Description: Secondary path name
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="lsp-sec-path-path-name", rest_name="lsp-sec-path-path-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_path_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-sec-path-path-name", rest_name="lsp-sec-path-path-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)""",
})
self.__lsp_sec_path_path_name = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_path_name(self):
self.__lsp_sec_path_path_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-sec-path-path-name", rest_name="lsp-sec-path-path-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
def _get_lsp_sec_path_state(self):
"""
Getter method for lsp_sec_path_state, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_state (uint8)
YANG Description: Secondary path state
"""
return self.__lsp_sec_path_state
def _set_lsp_sec_path_state(self, v, load=False):
"""
Setter method for lsp_sec_path_state, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_state (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_state() directly.
YANG Description: Secondary path state
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-state", rest_name="lsp-sec-path-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_state must be of a type compatible with uint8""",
'defined-type': "uint8",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-state", rest_name="lsp-sec-path-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)""",
})
self.__lsp_sec_path_state = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_state(self):
self.__lsp_sec_path_state = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-state", rest_name="lsp-sec-path-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)
def _get_lsp_sec_path_state_up(self):
"""
Getter method for lsp_sec_path_state_up, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_state_up (boolean)
YANG Description: Secondary path state
"""
return self.__lsp_sec_path_state_up
def _set_lsp_sec_path_state_up(self, v, load=False):
"""
Setter method for lsp_sec_path_state_up, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_state_up (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_state_up is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_state_up() directly.
YANG Description: Secondary path state
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-state-up", rest_name="lsp-sec-path-state-up", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_state_up must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-state-up", rest_name="lsp-sec-path-state-up", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_state_up = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_state_up(self):
self.__lsp_sec_path_state_up = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-state-up", rest_name="lsp-sec-path-state-up", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_active(self):
"""
Getter method for lsp_sec_path_active, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_active (boolean)
YANG Description: Secondary path state atcive
"""
return self.__lsp_sec_path_active
def _set_lsp_sec_path_active(self, v, load=False):
"""
Setter method for lsp_sec_path_active, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_active (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_active is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_active() directly.
YANG Description: Secondary path state atcive
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-active", rest_name="lsp-sec-path-active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_active must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-active", rest_name="lsp-sec-path-active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_active = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_active(self):
self.__lsp_sec_path_active = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-active", rest_name="lsp-sec-path-active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_is_current_secondary(self):
"""
Getter method for lsp_sec_path_is_current_secondary, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_is_current_secondary (boolean)
YANG Description: Secondary path current secondary
"""
return self.__lsp_sec_path_is_current_secondary
def _set_lsp_sec_path_is_current_secondary(self, v, load=False):
"""
Setter method for lsp_sec_path_is_current_secondary, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_is_current_secondary (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_is_current_secondary is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_is_current_secondary() directly.
YANG Description: Secondary path current secondary
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-is-current-secondary", rest_name="lsp-sec-path-is-current-secondary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_is_current_secondary must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-is-current-secondary", rest_name="lsp-sec-path-is-current-secondary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_is_current_secondary = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_is_current_secondary(self):
self.__lsp_sec_path_is_current_secondary = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-is-current-secondary", rest_name="lsp-sec-path-is-current-secondary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_is_selected_secondary(self):
"""
Getter method for lsp_sec_path_is_selected_secondary, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_is_selected_secondary (boolean)
YANG Description: Secondary path decondary secondary
"""
return self.__lsp_sec_path_is_selected_secondary
def _set_lsp_sec_path_is_selected_secondary(self, v, load=False):
"""
Setter method for lsp_sec_path_is_selected_secondary, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_is_selected_secondary (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_is_selected_secondary is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_is_selected_secondary() directly.
YANG Description: Secondary path decondary secondary
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-is-selected-secondary", rest_name="lsp-sec-path-is-selected-secondary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_is_selected_secondary must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-is-selected-secondary", rest_name="lsp-sec-path-is-selected-secondary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_is_selected_secondary = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_is_selected_secondary(self):
self.__lsp_sec_path_is_selected_secondary = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-is-selected-secondary", rest_name="lsp-sec-path-is-selected-secondary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_config_reoptimize_timer_configured(self):
"""
Getter method for lsp_sec_path_config_reoptimize_timer_configured, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_reoptimize_timer_configured (boolean)
YANG Description: LSP reoptimization timer configured
"""
return self.__lsp_sec_path_config_reoptimize_timer_configured
def _set_lsp_sec_path_config_reoptimize_timer_configured(self, v, load=False):
"""
Setter method for lsp_sec_path_config_reoptimize_timer_configured, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_reoptimize_timer_configured (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_reoptimize_timer_configured is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_reoptimize_timer_configured() directly.
YANG Description: LSP reoptimization timer configured
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-reoptimize-timer-configured", rest_name="lsp-sec-path-config-reoptimize-timer-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_reoptimize_timer_configured must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-reoptimize-timer-configured", rest_name="lsp-sec-path-config-reoptimize-timer-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_config_reoptimize_timer_configured = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_reoptimize_timer_configured(self):
self.__lsp_sec_path_config_reoptimize_timer_configured = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-reoptimize-timer-configured", rest_name="lsp-sec-path-config-reoptimize-timer-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_config_reoptimize_timer(self):
"""
Getter method for lsp_sec_path_config_reoptimize_timer, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_reoptimize_timer (uint32)
YANG Description: LSP reoptimization timer value
"""
return self.__lsp_sec_path_config_reoptimize_timer
def _set_lsp_sec_path_config_reoptimize_timer(self, v, load=False):
"""
Setter method for lsp_sec_path_config_reoptimize_timer, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_reoptimize_timer (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_reoptimize_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_reoptimize_timer() directly.
YANG Description: LSP reoptimization timer value
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-reoptimize-timer", rest_name="lsp-sec-path-config-reoptimize-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_reoptimize_timer must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-reoptimize-timer", rest_name="lsp-sec-path-config-reoptimize-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""",
})
self.__lsp_sec_path_config_reoptimize_timer = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_reoptimize_timer(self):
self.__lsp_sec_path_config_reoptimize_timer = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-reoptimize-timer", rest_name="lsp-sec-path-config-reoptimize-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
def _get_lsp_sec_path_config_tspec_mtu_configured(self):
"""
Getter method for lsp_sec_path_config_tspec_mtu_configured, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_tspec_mtu_configured (boolean)
YANG Description: LSP traffic spec mtu configured
"""
return self.__lsp_sec_path_config_tspec_mtu_configured
def _set_lsp_sec_path_config_tspec_mtu_configured(self, v, load=False):
"""
Setter method for lsp_sec_path_config_tspec_mtu_configured, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_tspec_mtu_configured (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_tspec_mtu_configured is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_tspec_mtu_configured() directly.
YANG Description: LSP traffic spec mtu configured
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tspec-mtu-configured", rest_name="lsp-sec-path-config-tspec-mtu-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_tspec_mtu_configured must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tspec-mtu-configured", rest_name="lsp-sec-path-config-tspec-mtu-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_config_tspec_mtu_configured = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_tspec_mtu_configured(self):
self.__lsp_sec_path_config_tspec_mtu_configured = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tspec-mtu-configured", rest_name="lsp-sec-path-config-tspec-mtu-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_sec_path_config_tspec_mtu(self):
"""
Getter method for lsp_sec_path_sec_path_config_tspec_mtu, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_sec_path_config_tspec_mtu (uint32)
YANG Description: LSP traffic spec mtu value
"""
return self.__lsp_sec_path_sec_path_config_tspec_mtu
def _set_lsp_sec_path_sec_path_config_tspec_mtu(self, v, load=False):
"""
Setter method for lsp_sec_path_sec_path_config_tspec_mtu, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_sec_path_config_tspec_mtu (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_sec_path_config_tspec_mtu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_sec_path_config_tspec_mtu() directly.
YANG Description: LSP traffic spec mtu value
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-sec-path-config-tspec-mtu", rest_name="lsp-sec-path-sec-path-config-tspec-mtu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_sec_path_config_tspec_mtu must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-sec-path-config-tspec-mtu", rest_name="lsp-sec-path-sec-path-config-tspec-mtu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""",
})
self.__lsp_sec_path_sec_path_config_tspec_mtu = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_sec_path_config_tspec_mtu(self):
self.__lsp_sec_path_sec_path_config_tspec_mtu = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-sec-path-config-tspec-mtu", rest_name="lsp-sec-path-sec-path-config-tspec-mtu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
def _get_lsp_sec_path_config_cos_configured(self):
"""
Getter method for lsp_sec_path_config_cos_configured, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_cos_configured (boolean)
YANG Description: LSP cos value configured
"""
return self.__lsp_sec_path_config_cos_configured
def _set_lsp_sec_path_config_cos_configured(self, v, load=False):
"""
Setter method for lsp_sec_path_config_cos_configured, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_cos_configured (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_cos_configured is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_cos_configured() directly.
YANG Description: LSP cos value configured
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-cos-configured", rest_name="lsp-sec-path-config-cos-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_cos_configured must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-cos-configured", rest_name="lsp-sec-path-config-cos-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_config_cos_configured = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_cos_configured(self):
self.__lsp_sec_path_config_cos_configured = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-cos-configured", rest_name="lsp-sec-path-config-cos-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_config_cos(self):
"""
Getter method for lsp_sec_path_config_cos, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_cos (uint8)
YANG Description: LSP cos value
"""
return self.__lsp_sec_path_config_cos
def _set_lsp_sec_path_config_cos(self, v, load=False):
"""
Setter method for lsp_sec_path_config_cos, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_cos (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_cos is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_cos() directly.
YANG Description: LSP cos value
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-config-cos", rest_name="lsp-sec-path-config-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_cos must be of a type compatible with uint8""",
'defined-type': "uint8",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-config-cos", rest_name="lsp-sec-path-config-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)""",
})
self.__lsp_sec_path_config_cos = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_cos(self):
self.__lsp_sec_path_config_cos = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-config-cos", rest_name="lsp-sec-path-config-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)
def _get_lsp_sec_path_config_mtu_configured(self):
"""
Getter method for lsp_sec_path_config_mtu_configured, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_mtu_configured (boolean)
YANG Description: LSP MTU value configured
"""
return self.__lsp_sec_path_config_mtu_configured
def _set_lsp_sec_path_config_mtu_configured(self, v, load=False):
"""
Setter method for lsp_sec_path_config_mtu_configured, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_mtu_configured (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_mtu_configured is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_mtu_configured() directly.
YANG Description: LSP MTU value configured
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-mtu-configured", rest_name="lsp-sec-path-config-mtu-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_mtu_configured must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-mtu-configured", rest_name="lsp-sec-path-config-mtu-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_config_mtu_configured = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_mtu_configured(self):
self.__lsp_sec_path_config_mtu_configured = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-mtu-configured", rest_name="lsp-sec-path-config-mtu-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_config_mtu(self):
"""
Getter method for lsp_sec_path_config_mtu, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_mtu (uint32)
YANG Description: LSP MTU value
"""
return self.__lsp_sec_path_config_mtu
def _set_lsp_sec_path_config_mtu(self, v, load=False):
"""
Setter method for lsp_sec_path_config_mtu, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_mtu (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_mtu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_mtu() directly.
YANG Description: LSP MTU value
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-mtu", rest_name="lsp-sec-path-config-mtu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_mtu must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-mtu", rest_name="lsp-sec-path-config-mtu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""",
})
self.__lsp_sec_path_config_mtu = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_mtu(self):
self.__lsp_sec_path_config_mtu = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-mtu", rest_name="lsp-sec-path-config-mtu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
def _get_lsp_sec_path_config_tie_breaking_configured(self):
"""
Getter method for lsp_sec_path_config_tie_breaking_configured, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_tie_breaking_configured (boolean)
YANG Description: LSP CSPF tie-breaking configured
"""
return self.__lsp_sec_path_config_tie_breaking_configured
def _set_lsp_sec_path_config_tie_breaking_configured(self, v, load=False):
"""
Setter method for lsp_sec_path_config_tie_breaking_configured, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_tie_breaking_configured (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_tie_breaking_configured is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_tie_breaking_configured() directly.
YANG Description: LSP CSPF tie-breaking configured
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tie-breaking-configured", rest_name="lsp-sec-path-config-tie-breaking-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_tie_breaking_configured must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tie-breaking-configured", rest_name="lsp-sec-path-config-tie-breaking-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_config_tie_breaking_configured = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_tie_breaking_configured(self):
self.__lsp_sec_path_config_tie_breaking_configured = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tie-breaking-configured", rest_name="lsp-sec-path-config-tie-breaking-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_config_tie_break_random(self):
"""
Getter method for lsp_sec_path_config_tie_break_random, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_tie_break_random (boolean)
YANG Description: LSP cspf tie braking is random
"""
return self.__lsp_sec_path_config_tie_break_random
def _set_lsp_sec_path_config_tie_break_random(self, v, load=False):
"""
Setter method for lsp_sec_path_config_tie_break_random, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_tie_break_random (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_tie_break_random is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_tie_break_random() directly.
YANG Description: LSP cspf tie braking is random
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tie-break-random", rest_name="lsp-sec-path-config-tie-break-random", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_tie_break_random must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tie-break-random", rest_name="lsp-sec-path-config-tie-break-random", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_config_tie_break_random = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_tie_break_random(self):
self.__lsp_sec_path_config_tie_break_random = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tie-break-random", rest_name="lsp-sec-path-config-tie-break-random", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_config_tie_break_least_fill(self):
"""
Getter method for lsp_sec_path_config_tie_break_least_fill, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_tie_break_least_fill (boolean)
YANG Description: LSP cspf tie braking is least fill
"""
return self.__lsp_sec_path_config_tie_break_least_fill
def _set_lsp_sec_path_config_tie_break_least_fill(self, v, load=False):
"""
Setter method for lsp_sec_path_config_tie_break_least_fill, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_tie_break_least_fill (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_tie_break_least_fill is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_tie_break_least_fill() directly.
YANG Description: LSP cspf tie braking is least fill
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tie-break-least-fill", rest_name="lsp-sec-path-config-tie-break-least-fill", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_tie_break_least_fill must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tie-break-least-fill", rest_name="lsp-sec-path-config-tie-break-least-fill", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_config_tie_break_least_fill = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_tie_break_least_fill(self):
self.__lsp_sec_path_config_tie_break_least_fill = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tie-break-least-fill", rest_name="lsp-sec-path-config-tie-break-least-fill", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_config_tie_break_most_fill(self):
"""
Getter method for lsp_sec_path_config_tie_break_most_fill, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_tie_break_most_fill (boolean)
YANG Description: LSP cspf tie braking is most-fill
"""
return self.__lsp_sec_path_config_tie_break_most_fill
def _set_lsp_sec_path_config_tie_break_most_fill(self, v, load=False):
"""
Setter method for lsp_sec_path_config_tie_break_most_fill, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_tie_break_most_fill (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_tie_break_most_fill is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_tie_break_most_fill() directly.
YANG Description: LSP cspf tie braking is most-fill
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tie-break-most-fill", rest_name="lsp-sec-path-config-tie-break-most-fill", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_tie_break_most_fill must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tie-break-most-fill", rest_name="lsp-sec-path-config-tie-break-most-fill", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_config_tie_break_most_fill = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_tie_break_most_fill(self):
self.__lsp_sec_path_config_tie_break_most_fill = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-tie-break-most-fill", rest_name="lsp-sec-path-config-tie-break-most-fill", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_config_cspf_disabled(self):
"""
Getter method for lsp_sec_path_config_cspf_disabled, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_cspf_disabled (boolean)
YANG Description: LSP cspf disabled
"""
return self.__lsp_sec_path_config_cspf_disabled
def _set_lsp_sec_path_config_cspf_disabled(self, v, load=False):
"""
Setter method for lsp_sec_path_config_cspf_disabled, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_cspf_disabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_cspf_disabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_cspf_disabled() directly.
YANG Description: LSP cspf disabled
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-cspf-disabled", rest_name="lsp-sec-path-config-cspf-disabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_cspf_disabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-cspf-disabled", rest_name="lsp-sec-path-config-cspf-disabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_config_cspf_disabled = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_cspf_disabled(self):
self.__lsp_sec_path_config_cspf_disabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-cspf-disabled", rest_name="lsp-sec-path-config-cspf-disabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_config_hot_standby(self):
"""
Getter method for lsp_sec_path_config_hot_standby, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_hot_standby (boolean)
YANG Description: LSP is hot standby
"""
return self.__lsp_sec_path_config_hot_standby
def _set_lsp_sec_path_config_hot_standby(self, v, load=False):
"""
Setter method for lsp_sec_path_config_hot_standby, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_hot_standby (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_hot_standby is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_hot_standby() directly.
YANG Description: LSP is hot standby
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-hot-standby", rest_name="lsp-sec-path-config-hot-standby", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_hot_standby must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-hot-standby", rest_name="lsp-sec-path-config-hot-standby", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_config_hot_standby = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_hot_standby(self):
self.__lsp_sec_path_config_hot_standby = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-hot-standby", rest_name="lsp-sec-path-config-hot-standby", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_config_pinned(self):
"""
Getter method for lsp_sec_path_config_pinned, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_pinned (boolean)
YANG Description: LSP is pinned
"""
return self.__lsp_sec_path_config_pinned
def _set_lsp_sec_path_config_pinned(self, v, load=False):
"""
Setter method for lsp_sec_path_config_pinned, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_pinned (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_pinned is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_pinned() directly.
YANG Description: LSP is pinned
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-pinned", rest_name="lsp-sec-path-config-pinned", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_pinned must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-pinned", rest_name="lsp-sec-path-config-pinned", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_config_pinned = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_pinned(self):
self.__lsp_sec_path_config_pinned = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-pinned", rest_name="lsp-sec-path-config-pinned", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_config_persistent(self):
"""
Getter method for lsp_sec_path_config_persistent, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_persistent (boolean)
YANG Description: LSP is persistent
"""
return self.__lsp_sec_path_config_persistent
def _set_lsp_sec_path_config_persistent(self, v, load=False):
"""
Setter method for lsp_sec_path_config_persistent, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_persistent (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_persistent is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_persistent() directly.
YANG Description: LSP is persistent
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-persistent", rest_name="lsp-sec-path-config-persistent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_persistent must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-persistent", rest_name="lsp-sec-path-config-persistent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_config_persistent = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_persistent(self):
self.__lsp_sec_path_config_persistent = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-persistent", rest_name="lsp-sec-path-config-persistent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_config_soft_prempt(self):
"""
Getter method for lsp_sec_path_config_soft_prempt, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_soft_prempt (boolean)
YANG Description: LSP soft preemption enabled
"""
return self.__lsp_sec_path_config_soft_prempt
def _set_lsp_sec_path_config_soft_prempt(self, v, load=False):
"""
Setter method for lsp_sec_path_config_soft_prempt, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_soft_prempt (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_soft_prempt is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_soft_prempt() directly.
YANG Description: LSP soft preemption enabled
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-soft-prempt", rest_name="lsp-sec-path-config-soft-prempt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_soft_prempt must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-soft-prempt", rest_name="lsp-sec-path-config-soft-prempt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_config_soft_prempt = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_soft_prempt(self):
self.__lsp_sec_path_config_soft_prempt = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-soft-prempt", rest_name="lsp-sec-path-config-soft-prempt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_config_priority_configured(self):
"""
Getter method for lsp_sec_path_config_priority_configured, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_priority_configured (boolean)
YANG Description: LSP priority configured
"""
return self.__lsp_sec_path_config_priority_configured
def _set_lsp_sec_path_config_priority_configured(self, v, load=False):
"""
Setter method for lsp_sec_path_config_priority_configured, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_priority_configured (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_priority_configured is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_priority_configured() directly.
YANG Description: LSP priority configured
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-priority-configured", rest_name="lsp-sec-path-config-priority-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_priority_configured must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-priority-configured", rest_name="lsp-sec-path-config-priority-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_config_priority_configured = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_priority_configured(self):
self.__lsp_sec_path_config_priority_configured = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-priority-configured", rest_name="lsp-sec-path-config-priority-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_config_setup_prority(self):
"""
Getter method for lsp_sec_path_config_setup_prority, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_setup_prority (uint8)
YANG Description: LSP setup priority
"""
return self.__lsp_sec_path_config_setup_prority
def _set_lsp_sec_path_config_setup_prority(self, v, load=False):
"""
Setter method for lsp_sec_path_config_setup_prority, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_setup_prority (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_setup_prority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_setup_prority() directly.
YANG Description: LSP setup priority
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-config-setup-prority", rest_name="lsp-sec-path-config-setup-prority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_setup_prority must be of a type compatible with uint8""",
'defined-type': "uint8",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-config-setup-prority", rest_name="lsp-sec-path-config-setup-prority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)""",
})
self.__lsp_sec_path_config_setup_prority = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_setup_prority(self):
self.__lsp_sec_path_config_setup_prority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-config-setup-prority", rest_name="lsp-sec-path-config-setup-prority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)
def _get_lsp_sec_path_config_holding_prority(self):
"""
Getter method for lsp_sec_path_config_holding_prority, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_holding_prority (uint8)
YANG Description: LSP holding priority
"""
return self.__lsp_sec_path_config_holding_prority
def _set_lsp_sec_path_config_holding_prority(self, v, load=False):
"""
Setter method for lsp_sec_path_config_holding_prority, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_holding_prority (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_holding_prority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_holding_prority() directly.
YANG Description: LSP holding priority
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-config-holding-prority", rest_name="lsp-sec-path-config-holding-prority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_holding_prority must be of a type compatible with uint8""",
'defined-type': "uint8",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-config-holding-prority", rest_name="lsp-sec-path-config-holding-prority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)""",
})
self.__lsp_sec_path_config_holding_prority = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_holding_prority(self):
self.__lsp_sec_path_config_holding_prority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-config-holding-prority", rest_name="lsp-sec-path-config-holding-prority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)
def _get_lsp_sec_path_config_hop_limit_configured(self):
"""
Getter method for lsp_sec_path_config_hop_limit_configured, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_hop_limit_configured (boolean)
YANG Description: LSP hop limit is configured
"""
return self.__lsp_sec_path_config_hop_limit_configured
def _set_lsp_sec_path_config_hop_limit_configured(self, v, load=False):
"""
Setter method for lsp_sec_path_config_hop_limit_configured, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_hop_limit_configured (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_hop_limit_configured is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_hop_limit_configured() directly.
YANG Description: LSP hop limit is configured
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-hop-limit-configured", rest_name="lsp-sec-path-config-hop-limit-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_hop_limit_configured must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-hop-limit-configured", rest_name="lsp-sec-path-config-hop-limit-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_config_hop_limit_configured = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_hop_limit_configured(self):
self.__lsp_sec_path_config_hop_limit_configured = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-hop-limit-configured", rest_name="lsp-sec-path-config-hop-limit-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_config_hop_limit(self):
"""
Getter method for lsp_sec_path_config_hop_limit, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_hop_limit (uint8)
YANG Description: LSP hop limit
"""
return self.__lsp_sec_path_config_hop_limit
def _set_lsp_sec_path_config_hop_limit(self, v, load=False):
"""
Setter method for lsp_sec_path_config_hop_limit, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_hop_limit (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_hop_limit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_hop_limit() directly.
YANG Description: LSP hop limit
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-config-hop-limit", rest_name="lsp-sec-path-config-hop-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_hop_limit must be of a type compatible with uint8""",
'defined-type': "uint8",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-config-hop-limit", rest_name="lsp-sec-path-config-hop-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)""",
})
self.__lsp_sec_path_config_hop_limit = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_hop_limit(self):
self.__lsp_sec_path_config_hop_limit = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="lsp-sec-path-config-hop-limit", rest_name="lsp-sec-path-config-hop-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)
def _get_lsp_sec_path_config_traffic_eng_rate_configured(self):
"""
Getter method for lsp_sec_path_config_traffic_eng_rate_configured, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_traffic_eng_rate_configured (boolean)
YANG Description: LSP traffic engineering rates configured
"""
return self.__lsp_sec_path_config_traffic_eng_rate_configured
def _set_lsp_sec_path_config_traffic_eng_rate_configured(self, v, load=False):
"""
Setter method for lsp_sec_path_config_traffic_eng_rate_configured, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_traffic_eng_rate_configured (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_traffic_eng_rate_configured is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_traffic_eng_rate_configured() directly.
YANG Description: LSP traffic engineering rates configured
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-traffic-eng-rate-configured", rest_name="lsp-sec-path-config-traffic-eng-rate-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_traffic_eng_rate_configured must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-traffic-eng-rate-configured", rest_name="lsp-sec-path-config-traffic-eng-rate-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_config_traffic_eng_rate_configured = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_traffic_eng_rate_configured(self):
self.__lsp_sec_path_config_traffic_eng_rate_configured = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-traffic-eng-rate-configured", rest_name="lsp-sec-path-config-traffic-eng-rate-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_config_traffic_eng_mean_rate(self):
"""
Getter method for lsp_sec_path_config_traffic_eng_mean_rate, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_traffic_eng_mean_rate (uint32)
YANG Description: LSP traffic engineering mean rate
"""
return self.__lsp_sec_path_config_traffic_eng_mean_rate
def _set_lsp_sec_path_config_traffic_eng_mean_rate(self, v, load=False):
"""
Setter method for lsp_sec_path_config_traffic_eng_mean_rate, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_traffic_eng_mean_rate (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_traffic_eng_mean_rate is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_traffic_eng_mean_rate() directly.
YANG Description: LSP traffic engineering mean rate
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-traffic-eng-mean-rate", rest_name="lsp-sec-path-config-traffic-eng-mean-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_traffic_eng_mean_rate must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-traffic-eng-mean-rate", rest_name="lsp-sec-path-config-traffic-eng-mean-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""",
})
self.__lsp_sec_path_config_traffic_eng_mean_rate = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_traffic_eng_mean_rate(self):
self.__lsp_sec_path_config_traffic_eng_mean_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-traffic-eng-mean-rate", rest_name="lsp-sec-path-config-traffic-eng-mean-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
def _get_lsp_sec_path_config_traffic_eng_max_rate(self):
"""
Getter method for lsp_sec_path_config_traffic_eng_max_rate, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_traffic_eng_max_rate (uint32)
YANG Description: LSP traffic engineering max rate
"""
return self.__lsp_sec_path_config_traffic_eng_max_rate
def _set_lsp_sec_path_config_traffic_eng_max_rate(self, v, load=False):
"""
Setter method for lsp_sec_path_config_traffic_eng_max_rate, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_traffic_eng_max_rate (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_traffic_eng_max_rate is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_traffic_eng_max_rate() directly.
YANG Description: LSP traffic engineering max rate
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-traffic-eng-max-rate", rest_name="lsp-sec-path-config-traffic-eng-max-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_traffic_eng_max_rate must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-traffic-eng-max-rate", rest_name="lsp-sec-path-config-traffic-eng-max-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""",
})
self.__lsp_sec_path_config_traffic_eng_max_rate = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_traffic_eng_max_rate(self):
self.__lsp_sec_path_config_traffic_eng_max_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-traffic-eng-max-rate", rest_name="lsp-sec-path-config-traffic-eng-max-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
def _get_lsp_sec_path_config_traffic_eng_max_burst(self):
"""
Getter method for lsp_sec_path_config_traffic_eng_max_burst, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_traffic_eng_max_burst (uint32)
YANG Description: LSP traffic engineering max-burst
"""
return self.__lsp_sec_path_config_traffic_eng_max_burst
def _set_lsp_sec_path_config_traffic_eng_max_burst(self, v, load=False):
"""
Setter method for lsp_sec_path_config_traffic_eng_max_burst, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_traffic_eng_max_burst (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_traffic_eng_max_burst is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_traffic_eng_max_burst() directly.
YANG Description: LSP traffic engineering max-burst
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-traffic-eng-max-burst", rest_name="lsp-sec-path-config-traffic-eng-max-burst", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_traffic_eng_max_burst must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-traffic-eng-max-burst", rest_name="lsp-sec-path-config-traffic-eng-max-burst", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""",
})
self.__lsp_sec_path_config_traffic_eng_max_burst = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_traffic_eng_max_burst(self):
self.__lsp_sec_path_config_traffic_eng_max_burst = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-sec-path-config-traffic-eng-max-burst", rest_name="lsp-sec-path-config-traffic-eng-max-burst", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
def _get_lsp_sec_path_config_admin_group_configured(self):
"""
Getter method for lsp_sec_path_config_admin_group_configured, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_group_configured (boolean)
YANG Description: LSP secondary path admin group configured
"""
return self.__lsp_sec_path_config_admin_group_configured
def _set_lsp_sec_path_config_admin_group_configured(self, v, load=False):
"""
Setter method for lsp_sec_path_config_admin_group_configured, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_group_configured (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_admin_group_configured is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_admin_group_configured() directly.
YANG Description: LSP secondary path admin group configured
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-admin-group-configured", rest_name="lsp-sec-path-config-admin-group-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_admin_group_configured must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-admin-group-configured", rest_name="lsp-sec-path-config-admin-group-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)""",
})
self.__lsp_sec_path_config_admin_group_configured = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_admin_group_configured(self):
self.__lsp_sec_path_config_admin_group_configured = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-sec-path-config-admin-group-configured", rest_name="lsp-sec-path-config-admin-group-configured", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)
def _get_lsp_sec_path_config_admin_groups(self):
"""
Getter method for lsp_sec_path_config_admin_groups, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups (container)
"""
return self.__lsp_sec_path_config_admin_groups
def _set_lsp_sec_path_config_admin_groups(self, v, load=False):
"""
Setter method for lsp_sec_path_config_admin_groups, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_sec_path_config_admin_groups is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_sec_path_config_admin_groups() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=lsp_sec_path_config_admin_groups.lsp_sec_path_config_admin_groups, is_container='container', presence=False, yang_name="lsp-sec-path-config-admin-groups", rest_name="lsp-sec-path-config-admin-groups", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_sec_path_config_admin_groups must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=lsp_sec_path_config_admin_groups.lsp_sec_path_config_admin_groups, is_container='container', presence=False, yang_name="lsp-sec-path-config-admin-groups", rest_name="lsp-sec-path-config-admin-groups", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__lsp_sec_path_config_admin_groups = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_sec_path_config_admin_groups(self):
self.__lsp_sec_path_config_admin_groups = YANGDynClass(base=lsp_sec_path_config_admin_groups.lsp_sec_path_config_admin_groups, is_container='container', presence=False, yang_name="lsp-sec-path-config-admin-groups", rest_name="lsp-sec-path-config-admin-groups", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
lsp_sec_path_path_name = __builtin__.property(_get_lsp_sec_path_path_name, _set_lsp_sec_path_path_name)
lsp_sec_path_state = __builtin__.property(_get_lsp_sec_path_state, _set_lsp_sec_path_state)
lsp_sec_path_state_up = __builtin__.property(_get_lsp_sec_path_state_up, _set_lsp_sec_path_state_up)
lsp_sec_path_active = __builtin__.property(_get_lsp_sec_path_active, _set_lsp_sec_path_active)
lsp_sec_path_is_current_secondary = __builtin__.property(_get_lsp_sec_path_is_current_secondary, _set_lsp_sec_path_is_current_secondary)
lsp_sec_path_is_selected_secondary = __builtin__.property(_get_lsp_sec_path_is_selected_secondary, _set_lsp_sec_path_is_selected_secondary)
lsp_sec_path_config_reoptimize_timer_configured = __builtin__.property(_get_lsp_sec_path_config_reoptimize_timer_configured, _set_lsp_sec_path_config_reoptimize_timer_configured)
lsp_sec_path_config_reoptimize_timer = __builtin__.property(_get_lsp_sec_path_config_reoptimize_timer, _set_lsp_sec_path_config_reoptimize_timer)
lsp_sec_path_config_tspec_mtu_configured = __builtin__.property(_get_lsp_sec_path_config_tspec_mtu_configured, _set_lsp_sec_path_config_tspec_mtu_configured)
lsp_sec_path_sec_path_config_tspec_mtu = __builtin__.property(_get_lsp_sec_path_sec_path_config_tspec_mtu, _set_lsp_sec_path_sec_path_config_tspec_mtu)
lsp_sec_path_config_cos_configured = __builtin__.property(_get_lsp_sec_path_config_cos_configured, _set_lsp_sec_path_config_cos_configured)
lsp_sec_path_config_cos = __builtin__.property(_get_lsp_sec_path_config_cos, _set_lsp_sec_path_config_cos)
lsp_sec_path_config_mtu_configured = __builtin__.property(_get_lsp_sec_path_config_mtu_configured, _set_lsp_sec_path_config_mtu_configured)
lsp_sec_path_config_mtu = __builtin__.property(_get_lsp_sec_path_config_mtu, _set_lsp_sec_path_config_mtu)
lsp_sec_path_config_tie_breaking_configured = __builtin__.property(_get_lsp_sec_path_config_tie_breaking_configured, _set_lsp_sec_path_config_tie_breaking_configured)
lsp_sec_path_config_tie_break_random = __builtin__.property(_get_lsp_sec_path_config_tie_break_random, _set_lsp_sec_path_config_tie_break_random)
lsp_sec_path_config_tie_break_least_fill = __builtin__.property(_get_lsp_sec_path_config_tie_break_least_fill, _set_lsp_sec_path_config_tie_break_least_fill)
lsp_sec_path_config_tie_break_most_fill = __builtin__.property(_get_lsp_sec_path_config_tie_break_most_fill, _set_lsp_sec_path_config_tie_break_most_fill)
lsp_sec_path_config_cspf_disabled = __builtin__.property(_get_lsp_sec_path_config_cspf_disabled, _set_lsp_sec_path_config_cspf_disabled)
lsp_sec_path_config_hot_standby = __builtin__.property(_get_lsp_sec_path_config_hot_standby, _set_lsp_sec_path_config_hot_standby)
lsp_sec_path_config_pinned = __builtin__.property(_get_lsp_sec_path_config_pinned, _set_lsp_sec_path_config_pinned)
lsp_sec_path_config_persistent = __builtin__.property(_get_lsp_sec_path_config_persistent, _set_lsp_sec_path_config_persistent)
lsp_sec_path_config_soft_prempt = __builtin__.property(_get_lsp_sec_path_config_soft_prempt, _set_lsp_sec_path_config_soft_prempt)
lsp_sec_path_config_priority_configured = __builtin__.property(_get_lsp_sec_path_config_priority_configured, _set_lsp_sec_path_config_priority_configured)
lsp_sec_path_config_setup_prority = __builtin__.property(_get_lsp_sec_path_config_setup_prority, _set_lsp_sec_path_config_setup_prority)
lsp_sec_path_config_holding_prority = __builtin__.property(_get_lsp_sec_path_config_holding_prority, _set_lsp_sec_path_config_holding_prority)
lsp_sec_path_config_hop_limit_configured = __builtin__.property(_get_lsp_sec_path_config_hop_limit_configured, _set_lsp_sec_path_config_hop_limit_configured)
lsp_sec_path_config_hop_limit = __builtin__.property(_get_lsp_sec_path_config_hop_limit, _set_lsp_sec_path_config_hop_limit)
lsp_sec_path_config_traffic_eng_rate_configured = __builtin__.property(_get_lsp_sec_path_config_traffic_eng_rate_configured, _set_lsp_sec_path_config_traffic_eng_rate_configured)
lsp_sec_path_config_traffic_eng_mean_rate = __builtin__.property(_get_lsp_sec_path_config_traffic_eng_mean_rate, _set_lsp_sec_path_config_traffic_eng_mean_rate)
lsp_sec_path_config_traffic_eng_max_rate = __builtin__.property(_get_lsp_sec_path_config_traffic_eng_max_rate, _set_lsp_sec_path_config_traffic_eng_max_rate)
lsp_sec_path_config_traffic_eng_max_burst = __builtin__.property(_get_lsp_sec_path_config_traffic_eng_max_burst, _set_lsp_sec_path_config_traffic_eng_max_burst)
lsp_sec_path_config_admin_group_configured = __builtin__.property(_get_lsp_sec_path_config_admin_group_configured, _set_lsp_sec_path_config_admin_group_configured)
lsp_sec_path_config_admin_groups = __builtin__.property(_get_lsp_sec_path_config_admin_groups, _set_lsp_sec_path_config_admin_groups)
_pyangbind_elements = {'lsp_sec_path_path_name': lsp_sec_path_path_name, 'lsp_sec_path_state': lsp_sec_path_state, 'lsp_sec_path_state_up': lsp_sec_path_state_up, 'lsp_sec_path_active': lsp_sec_path_active, 'lsp_sec_path_is_current_secondary': lsp_sec_path_is_current_secondary, 'lsp_sec_path_is_selected_secondary': lsp_sec_path_is_selected_secondary, 'lsp_sec_path_config_reoptimize_timer_configured': lsp_sec_path_config_reoptimize_timer_configured, 'lsp_sec_path_config_reoptimize_timer': lsp_sec_path_config_reoptimize_timer, 'lsp_sec_path_config_tspec_mtu_configured': lsp_sec_path_config_tspec_mtu_configured, 'lsp_sec_path_sec_path_config_tspec_mtu': lsp_sec_path_sec_path_config_tspec_mtu, 'lsp_sec_path_config_cos_configured': lsp_sec_path_config_cos_configured, 'lsp_sec_path_config_cos': lsp_sec_path_config_cos, 'lsp_sec_path_config_mtu_configured': lsp_sec_path_config_mtu_configured, 'lsp_sec_path_config_mtu': lsp_sec_path_config_mtu, 'lsp_sec_path_config_tie_breaking_configured': lsp_sec_path_config_tie_breaking_configured, 'lsp_sec_path_config_tie_break_random': lsp_sec_path_config_tie_break_random, 'lsp_sec_path_config_tie_break_least_fill': lsp_sec_path_config_tie_break_least_fill, 'lsp_sec_path_config_tie_break_most_fill': lsp_sec_path_config_tie_break_most_fill, 'lsp_sec_path_config_cspf_disabled': lsp_sec_path_config_cspf_disabled, 'lsp_sec_path_config_hot_standby': lsp_sec_path_config_hot_standby, 'lsp_sec_path_config_pinned': lsp_sec_path_config_pinned, 'lsp_sec_path_config_persistent': lsp_sec_path_config_persistent, 'lsp_sec_path_config_soft_prempt': lsp_sec_path_config_soft_prempt, 'lsp_sec_path_config_priority_configured': lsp_sec_path_config_priority_configured, 'lsp_sec_path_config_setup_prority': lsp_sec_path_config_setup_prority, 'lsp_sec_path_config_holding_prority': lsp_sec_path_config_holding_prority, 'lsp_sec_path_config_hop_limit_configured': lsp_sec_path_config_hop_limit_configured, 'lsp_sec_path_config_hop_limit': lsp_sec_path_config_hop_limit, 'lsp_sec_path_config_traffic_eng_rate_configured': lsp_sec_path_config_traffic_eng_rate_configured, 'lsp_sec_path_config_traffic_eng_mean_rate': lsp_sec_path_config_traffic_eng_mean_rate, 'lsp_sec_path_config_traffic_eng_max_rate': lsp_sec_path_config_traffic_eng_max_rate, 'lsp_sec_path_config_traffic_eng_max_burst': lsp_sec_path_config_traffic_eng_max_burst, 'lsp_sec_path_config_admin_group_configured': lsp_sec_path_config_admin_group_configured, 'lsp_sec_path_config_admin_groups': lsp_sec_path_config_admin_groups, }
| 85.048729
| 2,530
| 0.789818
|
794cb04b442342b34f9dad144cee3c047e678912
| 212,673
|
py
|
Python
|
h2o-py/h2o/frame.py
|
further2006/h2o-3
|
b0dcacaeaf0814755334214c0897a976ee151c40
|
[
"Apache-2.0"
] | 1
|
2020-10-09T02:45:59.000Z
|
2020-10-09T02:45:59.000Z
|
h2o-py/h2o/frame.py
|
further2006/h2o-3
|
b0dcacaeaf0814755334214c0897a976ee151c40
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/h2o/frame.py
|
further2006/h2o-3
|
b0dcacaeaf0814755334214c0897a976ee151c40
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
H2O data frame.
:copyright: (c) 2016 H2O.ai
:license: Apache License Version 2.0 (see LICENSE for details)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.utils.compatibility import * # NOQA
import csv
import datetime
import functools
from io import StringIO
import os
import sys
import tempfile
import traceback
from types import FunctionType
import warnings
import h2o
from h2o.base import Keyed
from h2o.display import H2ODisplay
from h2o.exceptions import H2OTypeError, H2OValueError, H2ODeprecationWarning
from h2o.expr import ExprNode
from h2o.group_by import GroupBy
from h2o.job import H2OJob
from h2o.utils.config import get_config_value
from h2o.utils.shared_utils import (_handle_numpy_array, _handle_pandas_data_frame, _handle_python_dicts,
_handle_python_lists, _is_list, _is_str_list, _py_tmp_key, _quoted,
can_use_pandas, quote, normalize_slice, slice_is_normalized, check_frame_id)
from h2o.utils.typechecks import (assert_is_type, assert_satisfies, Enum, I, is_type, numeric, numpy_ndarray,
numpy_datetime, pandas_dataframe, pandas_timestamp, scipy_sparse, U)
__all__ = ("H2OFrame", )
class H2OFrame(Keyed):
"""
Primary data store for H2O.
H2OFrame is similar to pandas' ``DataFrame``, or R's ``data.frame``. One of the critical distinction is that the
data is generally not held in memory, instead it is located on a (possibly remote) H2O cluster, and thus
``H2OFrame`` represents a mere handle to that data.
Create a new H2OFrame object, possibly from some other object.
:param python_obj: object that will be converted to an ``H2OFrame``. This could have multiple types:
- None: create an empty H2OFrame
- A list/tuple of strings or numbers: create a single-column H2OFrame containing the contents of this list.
- A dictionary of ``{name: list}`` pairs: create an H2OFrame with multiple columns, each column having the
provided ``name`` and contents from ``list``. If the source dictionary is not an OrderedDict, then the
columns in the H2OFrame may appear shuffled.
- A list of lists of strings/numbers: construct an H2OFrame from a rectangular table of values, with inner
lists treated as rows of the table. I.e. ``H2OFrame([[1, 'a'], [2, 'b'], [3, 'c']])`` will create a
frame with 3 rows and 2 columns, one numeric and one string.
- A Pandas dataframe, or a Numpy ndarray: create a matching H2OFrame.
- A Scipy sparse matrix: create a matching sparse H2OFrame.
:param int header: if ``python_obj`` is a list of lists, this parameter can be used to indicate whether the
first row of the data represents headers. The value of -1 means the first row is data, +1 means the first
row is the headers, 0 (default) allows H2O to guess whether the first row contains data or headers.
:param List[str] column_names: explicit list of column names for the new H2OFrame. This will override any
column names derived from the data. If the python_obj does not contain explicit column names, and this
parameter is not given, then the columns will be named "C1", "C2", "C3", etc.
:param column_types: explicit column types for the new H2OFrame. This could be either a list of types for
each column, or a dictionary of {column name: column type} pairs. In the latter case you may override
types for only few columns, and let H2O choose the types of the rest.
:param na_strings: List of strings in the input data that should be interpreted as missing values. This could
be given on a per-column basis, either as a list-of-lists, or as a dictionary {column name: list of nas}.
:param str destination_frame: (internal) name of the target DKV key in the H2O backend.
:param str separator: (deprecated)
:example:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame
"""
# Temp flag: set this to false for now if encountering path conversion/expansion issues when import files to remote server
__LOCAL_EXPANSION_ON_SINGLE_IMPORT__ = True
#-------------------------------------------------------------------------------------------------------------------
# Construction
#-------------------------------------------------------------------------------------------------------------------
def __init__(self, python_obj=None, destination_frame=None, header=0, separator=",",
column_names=None, column_types=None, na_strings=None, skipped_columns=None):
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "long", "numeric",
"categorical", "factor", "enum", "time")
assert_is_type(python_obj, None, list, tuple, dict, numpy_ndarray, pandas_dataframe, scipy_sparse)
assert_is_type(destination_frame, None, str)
assert_is_type(header, -1, 0, 1)
assert_is_type(separator, I(str, lambda s: len(s) == 1))
assert_is_type(column_names, None, [str])
assert_is_type(column_types, None, [coltype], {str: coltype})
assert_is_type(na_strings, None, [str], [[str]], {str: [str]})
check_frame_id(destination_frame)
self._ex = ExprNode()
self._ex._children = None
self._is_frame = True # Indicate that this is an actual frame, allowing typechecks to be made
if python_obj is not None:
self._upload_python_object(python_obj, destination_frame, header, separator,
column_names, column_types, na_strings, skipped_columns)
@staticmethod
def _expr(expr, cache=None):
# TODO: merge this method with `__init__`
fr = H2OFrame()
fr._ex = expr
if cache is not None:
fr._ex._cache.fill_from(cache)
return fr
def _upload_python_object(self, python_obj, destination_frame=None, header=0, separator=",",
column_names=None, column_types=None, na_strings=None, skipped_columns=None):
assert_is_type(python_obj, list, tuple, dict, numpy_ndarray, pandas_dataframe, scipy_sparse)
if is_type(python_obj, scipy_sparse):
self._upload_sparse_matrix(python_obj, destination_frame=destination_frame)
return
# TODO: all these _handlers should really belong to this class, not to shared_utils.
processor = (_handle_pandas_data_frame if is_type(python_obj, pandas_dataframe) else
_handle_numpy_array if is_type(python_obj, numpy_ndarray) else
_handle_python_dicts if is_type(python_obj, dict) else
_handle_python_lists)
col_header, data_to_write = processor(python_obj, header)
if col_header is None or data_to_write is None:
raise H2OValueError("No data to write")
if not column_names:
column_names = col_header
# create a temporary file that will be written to
tmp_handle, tmp_path = tempfile.mkstemp(suffix=".csv")
tmp_file = os.fdopen(tmp_handle, 'w')
# create a new csv writer object thingy
csv_writer = csv.writer(tmp_file, dialect="excel", quoting=csv.QUOTE_NONNUMERIC)
csv_writer.writerow(column_names)
if data_to_write and isinstance(data_to_write[0], dict):
for row in data_to_write:
csv_writer.writerow([row.get(k, None) for k in col_header])
else:
csv_writer.writerows(data_to_write)
tmp_file.close() # close the streams
self._upload_parse(tmp_path, destination_frame, 1, separator, column_names, column_types, na_strings, skipped_columns)
os.remove(tmp_path) # delete the tmp file
def _upload_sparse_matrix(self, matrix, destination_frame=None):
import scipy.sparse as sp
if not sp.issparse(matrix):
raise H2OValueError("A sparse matrix expected, got %s" % type(matrix))
tmp_handle, tmp_path = tempfile.mkstemp(suffix=".svmlight")
out = os.fdopen(tmp_handle, "wt")
if destination_frame is None:
destination_frame = _py_tmp_key(h2o.connection().session_id)
# sp.find(matrix) returns (row indices, column indices, values) of the non-zero elements of A. Unfortunately
# there is no guarantee that those elements are returned in the correct order, so need to sort
data = zip(*sp.find(matrix))
if not isinstance(data, list): data = list(data) # possibly convert from iterator to a list
data.sort()
idata = 0 # index of the next element to be consumed from `data`
for irow in range(matrix.shape[0]):
if idata < len(data) and data[idata][0] == irow and data[idata][1] == 0:
y = data[idata][2]
idata += 1
else:
y = 0
out.write(str(y))
while idata < len(data) and data[idata][0] == irow:
out.write(" ")
out.write(str(data[idata][1]))
out.write(":")
out.write(str(data[idata][2]))
idata += 1
out.write("\n")
out.close()
ret = h2o.api("POST /3/PostFile", filename=tmp_path)
os.remove(tmp_path)
rawkey = ret["destination_frame"]
p = {"source_frames": [rawkey], "destination_frame": destination_frame}
H2OJob(h2o.api("POST /3/ParseSVMLight", data=p), "Parse").poll()
self._ex._cache._id = destination_frame
self._ex._cache.fill()
@staticmethod
def get_frame(frame_id, rows=10, rows_offset=0, cols=-1, full_cols=-1, cols_offset=0, light=False):
"""
Retrieve an existing H2OFrame from the H2O cluster using the frame's id.
:param str frame_id: id of the frame to retrieve
:param int rows: number of rows to fetch for preview (10 by default)
:param int rows_offset: offset to fetch rows from (0 by default)
:param int cols: number of columns to fetch (all by default)
:param full_cols: number of columns to fetch together with backed data
:param int cols_offset: offset to fetch rows from (0 by default)
:param bool light: whether to use light frame endpoint or not
:returns: an existing H2OFrame with the id provided; or None if such frame doesn't exist.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> h2o.get_frame(iris.frame_id)
"""
fr = H2OFrame()
fr._ex._cache._id = frame_id
try:
fr._ex._cache.fill(rows=rows, rows_offset=rows_offset, cols=cols, full_cols=full_cols, cols_offset=cols_offset, light=light)
except EnvironmentError:
return None
return fr
@staticmethod
def _validate(param, name, required=False, message=None):
message = message or "'{}' must be a valid H2OFrame!".format(name)
if param is None:
if required:
raise ValueError(message)
else:
return
else:
assert_is_type(param, H2OFrame, message=message)
return param
def refresh(self):
"""
Reload frame information from the backend H2O server.
:returns: Frame information from the backend H2O server.
:examples:
>>> dataframe = {'A': [1,0,3,4],
... 'B': [5,6,-6, -1],
... 'C':[-4, -6, -7, 8]}
>>> frame = h2o.H2OFrame(dataframe)
>>> frame_asin = frame.asin()
>>> assert set(frame.names) ==
... {"A", "B", "C"},
... "Expected original colnames to remain the same after uniop operation"
>>> assert ["asin(%s)" % (name) for name in frame.names] ==
... frame_asin.names,"Expected equal col names after",
... " uniop operation"
>>> frame_asin.refresh()
"""
self._ex._cache.flush()
self._frame(fill_cache=True)
#-------------------------------------------------------------------------------------------------------------------
# Frame properties
#-------------------------------------------------------------------------------------------------------------------
@property
def key(self):
"""
Displays the unique key representing the object on the backend.
:returns: the unique key representing the object on the backend
:examples:
>>> frame = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> frame.key
"""
return None if self._ex is None else self._ex._cache._id
@property
def names(self):
"""
The list of column names (List[str]).
:returns: The list of column names.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader_NA_2.csv")
>>> iris.names
"""
if not self._ex._cache.names_valid():
self._ex._cache.flush()
self._frame(fill_cache=True)
return list(self._ex._cache.names)
@names.setter
def names(self, value):
self.set_names(value)
@property
def nrows(self):
"""
Number of rows in the dataframe (int).
:returns: Number of rows in the dataframe.
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader_NA_2.csv")
>>> iris.nrows
"""
if not self._ex._cache.nrows_valid():
self._ex._cache.flush()
self._frame(fill_cache=True)
return self._ex._cache.nrows
@property
def ncols(self):
"""
Number of columns in the dataframe (int).
:returns: Number of columns in the dataframe.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader_NA_2.csv")
>>> iris.ncols
"""
if not self._ex._cache.ncols_valid():
self._ex._cache.flush()
self._frame(fill_cache=True)
return self._ex._cache.ncols
@property
def shape(self):
"""
Number of rows and columns in the dataframe as a tuple ``(nrows, ncols)``.
:returns: Number of rows and columns in the dataframe as a tuple
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> iris = iris[:, 0:4]
>>> iris.shape
"""
return self.nrows, self.ncols
@property
def types(self):
"""
The dictionary of column name/type pairs.
:returns: Dictionary of column name/type pairs.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> iris.types
"""
if not self._ex._cache.types_valid():
self._ex._cache.flush()
self._frame(fill_cache=True)
return dict(self._ex._cache.types)
@property
def frame_id(self):
"""
Internal id of the frame (str).
:returns: Internal id of the frame (str).
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> print(iris.frame_id)
"""
return self._frame()._ex._cache._id
@frame_id.setter
def frame_id(self, newid):
check_frame_id(newid)
if self._ex._cache._id is None:
h2o.assign(self, newid)
else:
oldname = self.frame_id
self._ex._cache._id = newid
h2o.rapids("(rename \"{}\" \"{}\")".format(oldname, newid))
def type(self, col):
"""
The type for the given column.
:param col: either a name, or an index of the column to look up
:returns: type of the column, one of: ``str``, ``int``, ``real``, ``enum``, ``time``, ``bool``.
:raises H2OValueError: if such column does not exist in the frame.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> iris.type("C5")
"""
assert_is_type(col, int, str)
if not self._ex._cache.types_valid() or not self._ex._cache.names_valid():
self._ex._cache.flush()
self._frame(fill_cache=True)
types = self._ex._cache.types
if is_type(col, str):
if col in types:
return types[col]
else:
names = self._ex._cache.names
if -len(names) <= col < len(names):
return types[names[col]]
raise H2OValueError("Column '%r' does not exist in the frame" % col)
def _import_parse(self, path, pattern, destination_frame, header, separator, column_names, column_types, na_strings,
skipped_columns=None, custom_non_data_line_markers=None, partition_by=None):
if H2OFrame.__LOCAL_EXPANSION_ON_SINGLE_IMPORT__ and is_type(path, str) and "://" not in path: # fixme: delete those 2 lines, cf. PUBDEV-5717
path = os.path.abspath(path)
rawkey = h2o.lazy_import(path, pattern)
self._parse(rawkey, destination_frame, header, separator, column_names, column_types, na_strings,
skipped_columns, custom_non_data_line_markers, partition_by)
return self
def _upload_parse(self, path, destination_frame, header, sep, column_names, column_types, na_strings, skipped_columns=None):
ret = h2o.api("POST /3/PostFile", filename=path)
rawkey = ret["destination_frame"]
self._parse(rawkey, destination_frame, header, sep, column_names, column_types, na_strings, skipped_columns)
return self
def _parse(self, rawkey, destination_frame="", header=None, separator=None, column_names=None, column_types=None,
na_strings=None, skipped_columns=None, custom_non_data_line_markers=None, partition_by=None):
setup = h2o.parse_setup(rawkey, destination_frame, header, separator, column_names, column_types, na_strings,
skipped_columns, custom_non_data_line_markers, partition_by)
return self._parse_raw(setup)
def _parse_raw(self, setup):
# Parse parameters (None values provided by setup)
p = {"destination_frame": None,
"parse_type": None,
"separator": None,
"single_quotes": None,
"check_header": None,
"number_columns": None,
"chunk_size": None,
"delete_on_done": True,
"blocking": False,
"column_types": None,
"skipped_columns":None,
"custom_non_data_line_markers": setup["custom_non_data_line_markers"],
"partition_by": setup["partition_by"]
}
if setup["column_names"]: p["column_names"] = None
if setup["na_strings"]: p["na_strings"] = None
p.update({k: v for k, v in viewitems(setup) if k in p})
# Extract only 'name' from each src in the array of srcs
p['source_frames'] = [_quoted(src['name']) for src in setup['source_frames']]
H2OJob(h2o.api("POST /3/Parse", data=p), "Parse").poll()
# Need to return a Frame here for nearly all callers
# ... but job stats returns only a dest_key, requiring another REST call to get nrow/ncol
self._ex._cache._id = p["destination_frame"]
self._ex._cache.fill()
def filter_na_cols(self, frac=0.2):
"""
Filter columns with proportion of NAs greater or equals than ``frac``.
:param float frac: Maximum fraction of NAs in the column to keep.
:returns: A list of indices of columns that have fewer NAs than ``frac``. If all columns are filtered,
None is returned.
:examples:
>>> prostate = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip")
>>> include_cols1 = prostate.filter_na_cols()
>>> include_cols1
>>> include_cols2 = prostate.filter_na_cols(0.001)
>>> include_cols2
"""
return ExprNode("filterNACols", self, frac)._eager_scalar()
def columns_by_type(self, coltype="numeric"):
"""
Extract columns of the specified type from the frame.
:param str coltype: A character string indicating which column type to filter by. This must be
one of the following:
- ``"numeric"`` - Numeric, but not categorical or time
- ``"categorical"`` - Integer, with a categorical/factor String mapping
- ``"string"`` - String column
- ``"time"`` - Long msec since the Unix Epoch - with a variety of display/parse options
- ``"uuid"`` - UUID
- ``"bad"`` - No none-NA rows (triple negative! all NAs or zero rows)
:returns: list of indices of columns that have the requested type
:examples:
>>> frame = h2o.create_frame(rows=10,
... integer_fraction=1,
... binary_ones_fraction=0,
... missing_fraction=0)
>>> num = frame.columns_by_type(coltype="numeric")
>>> str = frame.columns_by_type(coltype="string")
>>> num
>>> string
"""
assert_is_type(coltype, "numeric", "categorical", "string", "time", "uuid", "bad")
assert_is_type(self, H2OFrame)
return ExprNode("columnsByType", self, coltype)._eager_scalar()
def __iter__(self):
return (self[i] for i in range(self.ncol))
def __unicode__(self):
if sys.gettrace() is None:
if self._ex is None: return "This H2OFrame has been removed."
table = self._frame(fill_cache=True)._ex._cache._tabulate("simple", False)
nrows = "%d %s" % (self.nrow, "row" if self.nrow == 1 else "rows")
ncols = "%d %s" % (self.ncol, "column" if self.ncol == 1 else "columns")
return "%s\n\n[%s x %s]" % (table, nrows, ncols)
return ""
def __repr__(self):
if sys.gettrace() is None:
# PUBDEV-2278: using <method>? from IPython caused everything to dump
stk = traceback.extract_stack()
if not ("IPython" in stk[-2][0] and "info" == stk[-2][2]):
self.show()
return ""
def _has_content(self):
return self._ex and (self._ex._children or self._ex._cache._id)
def show(self, use_pandas=False, rows=10, cols=200):
"""
Used by the H2OFrame.__repr__ method to print or display a snippet of the data frame.
If called from IPython, displays the results in HTML format. Otherwise, this prints a tabulated result.
:returns: snippet of the data frame.
:examples:
>>> from random import randrange
>>> import numpy as np
>>> row_num = randrange(1,10)
>>> col_num = randrange(1,10)
>>> python_lists = np.random.randint(-5,5, (row_num,col_num))
>>> h20frame = h2o.H2OFrame(python_obj=python_lists)
>>> h2oframe.show(use_pandas=False)
>>> h2oframe.show(use_pandas=True)
"""
if self._ex is None:
print("This H2OFrame has been removed.")
return
if not self._has_content():
print("This H2OFrame is empty and not initialized.")
return
if self.nrows == 0:
print("This H2OFrame is empty.")
return
if not self._ex._cache.is_valid(): self._frame()._ex._cache.fill()
if H2ODisplay._in_zep():
print("%html " + self._ex._cache._tabulate("html", False, rows=rows))
elif H2ODisplay._in_ipy():
import IPython.display
if use_pandas and can_use_pandas():
IPython.display.display(self.head(rows=rows, cols=cols).as_data_frame(use_pandas=True))
else:
IPython.display.display_html(self._ex._cache._tabulate("html", False, rows=rows), raw=True)
else:
if use_pandas and can_use_pandas():
print(self.head(rows=rows, cols=cols).as_data_frame(use_pandas=True))
else:
s = self.__unicode__()
stk = traceback.extract_stack()
if "IPython" in stk[-3][0]:
s = "\n%s" % s
try:
print(s)
except UnicodeEncodeError:
print(s.encode("ascii", "replace"))
def summary(self, return_data=False):
"""
Display summary information about the frame.
Summary includes min/mean/max/sigma and other rollup data.
:param bool return_data: Return a dictionary of the summary output
:returns: Summary of information about the frame
:examples:
>>> frame = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> frame.summary()
"""
if not self._has_content():
print("This H2OFrame is empty and not initialized.")
return self._ex._cache._data;
if not self._ex._cache.is_valid(): self._frame()._ex._cache.fill()
if not return_data:
if self.nrows == 0:
print("This H2OFrame is empty.")
elif H2ODisplay._in_zep():
print("%html " + self._ex._cache._tabulate("html", True))
elif H2ODisplay._in_ipy():
import IPython.display
IPython.display.display_html(self._ex._cache._tabulate("html", True), raw=True)
else:
print(self._ex._cache._tabulate("simple", True))
else:
return self._ex._cache._data
def describe(self, chunk_summary=False):
"""
Generate an in-depth description of this H2OFrame.
This will print to the console the dimensions of the frame; names/types/summary statistics for each column;
and finally first ten rows of the frame.
:param bool chunk_summary: Retrieve the chunk summary along with the distribution summary
:returns: The dimensions of the frame; names/types/summary statistics for each column; first ten rows of the frame.
:examples:
>>> python_lists = [[1,2,3],[4,5,6],["a","b","c"],[1,0,1]]
>>> col_names=["num1","num2","str1","enum1"]
>>> dest_frame="newFrame"
>>> heads=-1
>>> sep=','
>>> col_types=['numeric','numeric','string','enum']
>>> na_str=['NA']
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists,
... destination_frame=dest_frame,
... header=heads,
... separator=sep,
... column_names=col_names,
... column_types=col_types,
... na_strings=na_str)
>>> h2oframe.describe(chunk_summary=True)
"""
if self._has_content():
res = h2o.api("GET /3/Frames/%s" % self.frame_id, data={"row_count": 10})["frames"][0]
self._ex._cache._fill_data(res)
print("Rows:{}".format(self.nrow))
print("Cols:{}".format(self.ncol))
# The chunk & distribution summaries are not cached, so must be pulled if chunk_summary=True.
if chunk_summary:
res["chunk_summary"].show()
res["distribution_summary"].show()
print("\n")
self.summary()
def detach(self):
"""
Detach the Python object from the backend, usually by clearing its key
:returns: Removed H2OFrame
:examples:
>>> from random import randrange
>>> import numpy as np
>>> row_num = randrange(2,10)
>>> col_num = randrange(2,10)
>>> python_lists = np.random.randint(-5,5, (row_num, col_num))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> h2oframe.detach()
>>> h2oframe
"""
self._ex = None
def _frame(self, rows=10, rows_offset=0, cols=-1, cols_offset=0, fill_cache=False):
self._ex._eager_frame()
if fill_cache:
self._ex._cache.fill(rows=rows, rows_offset=rows_offset, cols=cols, cols_offset=cols_offset)
return self
def head(self, rows=10, cols=200):
"""
Return the first ``rows`` and ``cols`` of the frame as a new H2OFrame.
:param int rows: maximum number of rows to return
:param int cols: maximum number of columns to return
:returns: a new H2OFrame cut from the top left corner of the current frame, and having dimensions at
most ``rows`` x ``cols``.
:examples:
>>> import numpy as np
>>> from h2o.frame import H2OFrame
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> df = H2OFrame.from_python(np.random.randn(100, 4).tolist(),
... column_names=list("ABCD"),
... column_types=["enum"] * 4)
>>> df.head()
"""
assert_is_type(rows, int)
assert_is_type(cols, int)
nrows = min(self.nrows, rows)
ncols = min(self.ncols, cols)
newdt = self[:nrows, :ncols]
return newdt._frame(rows=nrows, cols=cols, fill_cache=True)
def tail(self, rows=10, cols=200):
"""
Return the last ``rows`` and ``cols`` of the frame as a new H2OFrame.
:param int rows: maximum number of rows to return
:param int cols: maximum number of columns to return
:returns: a new H2OFrame cut from the bottom left corner of the current frame, and having dimensions at
most ``rows`` x ``cols``.
:examples:
>>> from random import randrange
>>> import numpy as np
>>> row_num = randrange(2,10)
>>> col_num = randrange(2,10)
>>> python_lists = np.random.randint(-5,5, (row_num, col_num))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> h2oframe
>>> new_row = randrange(1, row_num)
>>> new_col = randrange(1, col_num)
>>> h2oframe.tail(rows=new_row, cols=new_col)
"""
assert_is_type(rows, int)
assert_is_type(cols, int)
nrows = min(self.nrows, rows)
ncols = min(self.ncols, cols)
start_idx = self.nrows - nrows
newdt = self[start_idx:start_idx + nrows, :ncols]
return newdt._frame(rows=nrows, cols=cols, fill_cache=True)
def logical_negation(self):
"""
Create a new H2OFrame equal to elementwise Logical NOT applied to the current frame.
:returns: New H2OFrame equal to elementwise Logical NOT applied to the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.logical_negation()
"""
return H2OFrame._expr(expr=ExprNode("not", self), cache=self._ex._cache)
def _unop(self, op, rtype="real"):
if self._is_frame:
for cname, ctype in self.types.items():
if ctype not in {"int", "real", "bool"}:
raise H2OValueError("Function %s cannot be applied to %s column '%s'" % (op, ctype, cname))
ret = H2OFrame._expr(expr=ExprNode(op, self), cache=self._ex._cache)
ret._ex._cache._names = ["%s(%s)" % (op, name) for name in self._ex._cache._names]
ret._ex._cache._types = {name: rtype for name in ret._ex._cache._names}
return ret
# Binary operations
def __add__(self, rhs):
return _binop(self, "+", rhs)
def __sub__(self, rhs):
return _binop(self, "-", rhs)
def __mul__(self, rhs):
return _binop(self, "*", rhs)
def __div__(self, rhs):
return _binop(self, "/", rhs)
def __truediv__(self, rhs):
return _binop(self, "/", rhs)
def __floordiv__(self, rhs):
return _binop(self, "intDiv", rhs)
def __mod__(self, rhs):
return _binop(self, "%", rhs)
def __or__(self, rhs):
return _binop(self, "|", rhs, rtype="bool")
def __and__(self, rhs):
return _binop(self, "&", rhs, rtype="bool")
def __ge__(self, rhs):
return _binop(self, ">=", rhs, rtype="bool")
def __gt__(self, rhs):
return _binop(self, ">", rhs, rtype="bool")
def __le__(self, rhs):
return _binop(self, "<=", rhs, rtype="bool")
def __lt__(self, rhs):
return _binop(self, "<", rhs, rtype="bool")
def __eq__(self, rhs):
if rhs is None: rhs = float("nan")
return _binop(self, "==", rhs, rtype="bool")
def __ne__(self, rhs):
if rhs is None: rhs = float("nan")
return _binop(self, "!=", rhs, rtype="bool")
def __pow__(self, rhs):
return _binop(self, "^", rhs)
def __contains__(self, lhs):
return all((t == self).any() for t in lhs) if _is_list(lhs) else (lhs == self).any()
# rops
def __rmod__(self, lhs):
return _binop(lhs, "%", self)
def __radd__(self, lhs):
return _binop(lhs, "+", self)
def __rsub__(self, lhs):
return _binop(lhs, "-", self)
def __rand__(self, lhs):
return _binop(lhs, "&", self, rtype="bool")
def __ror__(self, lhs):
return _binop(lhs, "|", self, rtype="bool")
def __rtruediv__(self, lhs):
return _binop(lhs, "/", self)
def __rdiv__(self, lhs):
return _binop(lhs, "/", self)
def __rfloordiv__(self, lhs):
return _binop(lhs, "intDiv", self, rtype="int")
def __rmul__(self, lhs):
return _binop(lhs, "*", self)
def __rpow__(self, lhs):
return _binop(lhs, "^", self)
# unops
def __abs__(self):
return self._unop("abs")
def __invert__(self):
return self._unop("!!", rtype="bool")
def __nonzero__(self):
if self.nrows > 1 or self.ncols > 1:
raise H2OValueError(
'This operation is not supported on an H2OFrame. Try using parentheses. '
'Did you mean & (logical and), | (logical or), or ~ (logical not)?')
else:
return self.__len__()
def __int__(self):
return int(self.flatten())
def __float__(self):
return float(self.flatten())
def flatten(self):
"""
Convert a 1x1 frame into a scalar.
:returns: content of this 1x1 frame as a scalar (``int``, ``float``, or ``str``).
:raises H2OValueError: if current frame has shape other than 1x1
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame1 = h2o.H2OFrame(python_obj)
>>> frame1.flatten()
# Should receive "H2OValueError: Not a 1x1 Frame"
>>> frame2 = h2o.H2OFrame.from_python(["redrum"])
>>> frame2.flatten()
"""
if self.shape != (1, 1): raise H2OValueError("Not a 1x1 Frame")
return ExprNode("flatten", self)._eager_scalar()
def getrow(self):
"""
Convert a 1xn frame into an n-element list.
:returns: content of this 1xn frame as a Python list.
:raises H2OValueError: if current frame has more than one row.
:examples:
>>> import scipy.sparse as sp
>>> A = sp.csr_matrix([[1, 2, 0, 5.5], [0, 0, 3, 6.7], [4, 0, 5, 0]])
>>> fr = h2o.H2OFrame(A)
>>> assert fr.shape == (3, 4)
>>> assert fr.as_data_frame(False) ==
... [['C1', 'C2', 'C3', 'C4'], ['1', '2', '0', '5.5'],
... ['0', '0', '3', '6.7'], ['4', '0', '5', '0.0']]
>>> A = sp.lil_matrix((1000, 1000))
>>> A.setdiag(10)
>>> for i in range(999):
... A[i, i + 1] = -3
... A[i + 1, i] = -2
>>> fr = h2o.H2OFrame(A)
>>> assert fr.shape == (1000, 1000)
>>> means = fr.mean().getrow()
>>> assert means == [0.008] + [0.005] * 998 + [0.007]
>>> means
"""
if self.nrows != 1:
raise H2OValueError("This method can only be applied to single-row frames")
return ExprNode("getrow", self)._eager_scalar()
def mult(self, matrix):
"""
Multiply this frame, viewed as a matrix, by another matrix.
:param matrix: another frame that you want to multiply the current frame by; must be compatible with the
current frame (i.e. its number of rows must be the same as number of columns in the current frame).
:returns: new H2OFrame, which is the result of multiplying the current frame by ``matrix``.
:examples:
>>> data = [[random.uniform(-10000,10000)] for c in range(100)]
>>> h2o_data = h2o.H2OFrame(data)
>>> h2o_mm = h2o_data.mult(h2o_data.transpose())
"""
if self.ncols != matrix.nrows:
raise H2OValueError("Matrix is not compatible for multiplication with the current frame")
return H2OFrame._expr(expr=ExprNode("x", self, matrix))
def cos(self):
"""
Create a new H2OFrame equal to elementwise cosine of the current frame.
:returns: New H2OFrame equal to elementwise cosine of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.cos()
"""
return self._unop("cos")
def sin(self):
"""
Create a new H2OFrame equal to elementwise sine of the current frame.
:returns: New H2OFrame equal to elementwise sine of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.sin()
"""
return self._unop("sin")
def tan(self):
"""
Create a new H2OFrame equal to elementwise tangent of the current frame.
:returns: New H2OFrame equal to elementwise tangent of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.tan()
"""
return self._unop("tan")
def acos(self):
"""
Create a new H2OFrame equal to elementwise arc cosine of the current frame.
:returns: New H2OFrame equal to elementwise arc cosine of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.acos()
"""
return self._unop("acos")
def asin(self):
"""
Create a new H2OFrame equal to elementwise arc sine of the current frame.
:returns: New H2OFrame equal to elementwise arc sine of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.asin()
"""
return self._unop("asin")
def atan(self):
"""
Create a new H2OFrame equal to elementwise arc tangent of the current frame.
:returns: New H2OFrame equal to elementwise arc tangent of the current frame.
:examples:
>>> python_obj = [1,2,2.5,-100.9,0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.atan()
"""
return self._unop("atan")
def cosh(self):
"""
Create a new H2OFrame with values equal to the hyperbolic cosines of the values in the current frame.
:returns: New H2OFrame with values equal to the hyperbolic cosines of the values in the current frame.
:examples:
>>> python_obj = [1,2,2.5,-100.9,0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.cosh()
"""
return self._unop("cosh")
def sinh(self):
"""
Create a new H2OFrame equal to elementwise hyperbolic sine of the current frame.
:returns: New H2OFrame equal to elementwise hyperbolic sine of the current frame.
:examples:
>>> python_obj = [1,2,2.5,-100.9,0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.sinh()
"""
return self._unop("sinh")
def tanh(self):
"""
Create a new H2OFrame equal to elementwise hyperbolic tangent of the current frame.
:returns: New H2OFrame equal to elementwise hyperbolic tangent of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.tanh()
"""
return self._unop("tanh")
def acosh(self):
"""
Create a new H2OFrame equal to elementwise inverse hyperbolic cosine of the current frame
:returns: New H2OFrame equal to elementwise inverse hyperbolic cosine of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.acosh()
"""
return self._unop("acosh")
def asinh(self):
"""
Create a new H2OFrame equal to elementwise inverse hyperbolic sine of the current frame.
:returns: New H2OFrame equal to elementwise inverse hyperbolic sine of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.asinh()
"""
return self._unop("asinh")
def atanh(self):
"""
Create a new H2OFrame equal to elementwise inverse hyperbolic tangent of the current frame.
:returns: New H2OFrame equal to elementwise inverse hyperbolic tangent of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.atanh()
"""
return self._unop("atanh")
def cospi(self):
"""
Create a new H2OFrame equal to elementwise cosine of the current frame multiplied by Pi.
:returns: New H2OFrame equal to elementwise cosine of the current frame multiplied by Pi.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.cospi()
"""
return self._unop("cospi")
def sinpi(self):
"""
Create a new H2OFrame equal to elementwise sine of the current frame multiplied by Pi.
:returns: New H2OFrame equal to elementwise sine of the current frame multiplied by Pi.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.sinpi()
"""
return self._unop("sinpi")
def tanpi(self):
"""
Create a new H2OFrame equal to elementwise tangent of the current frame multiplied by Pi.
:returns: New H2OFrame equal to elementwise tangent of the current frame multiplied by Pi.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.tanpi()
"""
return self._unop("tanpi")
def abs(self):
"""
Calculate the absolute value of the current frame.
:returns: new H2OFrame equal to elementwise absolute value of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> abs(frame)
"""
return self._unop("abs")
def sign(self):
"""
Return new H2OFrame equal to signs of the values in the frame: -1 , +1, or 0.
:returns: New H2OFrame equal to signs of the values in the frame: -1, +1, or 0.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> iris.sign()
"""
return self._unop("sign", rtype="int")
def sqrt(self):
"""
Create a new H2OFrame equal to elementwise square root of the current frame.
:returns: New H2OFrame equal to elementwise square root of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.sqrt()
"""
return self._unop("sqrt")
def trunc(self):
"""
Apply the numeric truncation function.
``trunc(x)`` is the integer obtained from ``x`` by dropping its decimal tail. This is equal to ``floor(x)``
if ``x`` is positive, and ``ceil(x)`` if ``x`` is negative. Truncation is also called "rounding towards zero".
:returns: new H2OFrame of truncated values of the original frame.
:examples:
>>> import math
>>> import numpy as np
>>> from random import randrange
>>> row_num = randrange(1,10)
>>> col_num = randrange(1,10)
>>> length_out_r = math.ceil(0.78*row_num)
>>> length_out_c = math.ceil(col_num*0.4)
>>> python_lists = np.random.randint(-5,5, (row_num, col_num))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> h2oframe.trunc()
"""
return self._unop("trunc", rtype="int")
def ceil(self):
"""
Apply the ceiling function to the current frame.
``ceil(x)`` is the smallest integer greater or equal to ``x``.
:returns: new H2OFrame of ceiling values of the original frame.
:examples:
>>> from random import randrange
>>> import math
>>> import numpy as np
>>> row_num = randrange(1,10)
>>> col_num = randrange(1,10)
>>> length_out_r = math.ceil(0.78*row_num)
>>> length_out_c = math.ceil(col_num*0.4)
>>> python_lists = np.random.randint(-5,5, (row_num, col_num))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
"""
return self._unop("ceiling", rtype="int")
def floor(self):
"""
Apply the floor function to the current frame. ``floor(x)`` is the largest integer smaller or equal to ``x``.
:returns: new H2OFrame of floor values of the original frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame
>>> frame.floor()
"""
return self._unop("floor", rtype="int")
def log(self):
"""
Create a new H2OFrame equal to elementwise natural logarithm of the current frame.
:returns: New H2OFrame equal to elementwise natural logarithm of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.log()
"""
return self._unop("log")
def log10(self):
"""
Create new H2OFrame equal to elementwise decimal logarithm of the current frame.
:returns: New H2OFrame equal to elementwise decimal logarithm of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.log10()
"""
return self._unop("log10")
def log1p(self):
"""
Create a new H2Oframe equal to elementwise ``ln(1 + x)`` for each ``x`` in the current frame.
:returns: New H2OFrame equals to elementwise ``ln(1 + x)`` for each ``x`` in the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.log1p()
"""
return self._unop("log1p")
def log2(self):
"""
Create a new H2OFrame equal to elementwise binary logarithm of the current frame.
:returns: New H2OFrame equal to elementwise binary logarithm of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.log2()
"""
return self._unop("log2")
def exp(self):
"""
Create a new H2OFrame equal to elementwise exponent (i.e. ``e^x``) of the current frame.
:returns: New H2OFrame equals to elementwise exponent (i.e. ``e^x``) of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.exp()
"""
return self._unop("exp")
def expm1(self):
"""
Create a new H2OFrame equal to elementwise exponent minus 1 (i.e. ``e^x - 1``) of the current frame.
:returns: New H2OFrame equal to elementwise exponent minus 1 (i.e. ``e^x - 1``) of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.expm1()
"""
return self._unop("expm1")
def gamma(self):
"""
Create a new H2OFrame equal to elementwise gamma function of the current frame.
:returns: new H2OFrame equals to elementwise gamma function of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.gamma()
"""
return self._unop("gamma")
def lgamma(self):
"""
Create a new H2OFrame equal to elementwise logarirth of the gamma function of the current frame.
:returns: New H2OFrame equal to elementwise logarithm of the gamma function of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.lgamma()
"""
return self._unop("lgamma")
def digamma(self):
"""
Create a new H2OFrame equal to elementwise digamma function of the current frame.
:returns: New H2OFrame equal to elementwise digamma function of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.digamma()
"""
return self._unop("digamma")
def trigamma(self):
"""
Create a new H2OFrame equal to the elementwise trigamma function of the current frame.
:returns: new H2OFrame equal to elementwise trigamma function of the current frame.
:examples:
>>> python_obj = [1, 2, 2.5, -100.9, 0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.trigamma()
"""
return self._unop("trigamma")
@staticmethod
def moment(year=None, month=None, day=None, hour=None, minute=None, second=None, msec=None, date=None, time=None):
"""
Create a time column from individual components.
Each parameter should be either an integer, or a single-column H2OFrame
containing the corresponding time parts for each row.
The "date" part of the timestamp can be specified using either the tuple ``(year, month, day)``, or an
explicit ``date`` parameter. The "time" part of the timestamp is optional, but can be specified either via
the ``time`` parameter, or via the ``(hour, minute, second, msec)`` tuple.
:param year: the year part of the constructed date
:param month: the month part of the constructed date
:param day: the day-of-the-month part of the constructed date
:param hour: the hours part of the constructed date
:param minute: the minutes part of the constructed date
:param second: the seconds part of the constructed date
:param msec: the milliseconds part of the constructed date
:param date date: construct the timestamp from the Python's native ``datetime.date`` (or ``datetime.datetime``)
object. If the object passed is of type ``date``, then you can specify the time part using either the
``time`` argument, or ``hour`` ... ``msec`` arguments (but not both). If the object passed is of type
``datetime``, then no other arguments can be provided.
:param time time: construct the timestamp from this Python's native ``datetime.time`` object. This argument
cannot be used alone, it should be supplemented with either ``date`` argument, or ``year`` ... ``day``
tuple.
:returns: H2OFrame with one column containing the date constructed from the provided arguments.
:examples:
>>> df = h2o.create_frame(rows=10,
... cols=3,
... factors=10,
... categorical_fraction=1.0/3,
... time_fraction=1.0/3,
... real_fraction=1.0/3,
... real_range=100,
... missing_fraction=0.0,
... seed=123)
>>> df["C1"].moment(year=df["C1"].year(),
... month=df["C1"].month(),
... day=df["C1"].day(),
... hour=df["C1"].hour(),
... minute=df["C1"].minute(),
... second=df["C1"].second())
"""
assert_is_type(date, None, datetime.date, numpy_datetime, pandas_timestamp)
assert_is_type(time, None, datetime.time)
assert_is_type(year, None, int, H2OFrame)
assert_is_type(month, None, int, H2OFrame)
assert_is_type(day, None, int, H2OFrame)
assert_is_type(hour, None, int, H2OFrame)
assert_is_type(minute, None, int, H2OFrame)
assert_is_type(second, None, int, H2OFrame)
assert_is_type(msec, None, int, H2OFrame)
if time is not None:
if hour is not None or minute is not None or second is not None or msec is not None:
raise H2OValueError("Arguments hour, minute, second, msec cannot be used together with `time`.")
hour = time.hour
minute = time.minute
second = time.second
msec = time.microsecond // 1000
if date is not None:
if is_type(date, pandas_timestamp):
date = date.to_pydatetime()
if is_type(date, numpy_datetime):
date = date.astype("M8[ms]").astype("O")
if year is not None or month is not None or day is not None:
raise H2OValueError("Arguments year, month and day cannot be used together with `date`.")
year = date.year
month = date.month
day = date.day
if isinstance(date, datetime.datetime):
if time is not None:
raise H2OValueError("Argument `time` cannot be used together with `date` of datetime type.")
if hour is not None or minute is not None or second is not None or msec is not None:
raise H2OValueError("Arguments hour, minute, second, msec cannot be used together with `date` "
"of datetime type.")
hour = date.hour
minute = date.minute
second = date.second
msec = date.microsecond // 1000
if year is None or month is None or day is None:
raise H2OValueError("Either arguments (`year`, `month` and `day`) or the `date` are required.")
if hour is None: hour = 0
if minute is None: minute = 0
if second is None: second = 0
if msec is None: msec = 0
local_vars = locals()
res_nrows = None
for n in ["year", "month", "day", "hour", "minute", "second", "msec"]:
x = local_vars[n]
if isinstance(x, H2OFrame):
if x.ncols != 1:
raise H2OValueError("Argument `%s` is a frame with more than 1 column" % n)
if x.type(0) not in {"int", "real"}:
raise H2OValueError("Column `%s` is not numeric (type = %s)" % (n, x.type(0)))
if res_nrows is None:
res_nrows = x.nrows
if x.nrows == 0 or x.nrows != res_nrows:
raise H2OValueError("Incompatible column `%s` having %d rows" % (n, x.nrows))
if res_nrows is None:
res_nrows = 1
res = H2OFrame._expr(ExprNode("moment", year, month, day, hour, minute, second, msec))
res._ex._cache._names = ["name"]
res._ex._cache._types = {"name": "time"}
res._ex._cache._nrows = res_nrows
res._ex._cache._ncols = 1
return res
def unique(self, include_nas=False):
"""
Extract the unique values in the column.
:param include_nas: If set to true, NAs are included. False (turned off) by default.
:returns: H2OFrame of just the unique values in the column.
:examples:
>>> import numpy as np
>>> python_lists = np.random.randint(-5,5, (100,1))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> h2oframe.unique()
"""
return H2OFrame._expr(expr=ExprNode("unique", self, include_nas))
def levels(self):
"""
Get the factor levels.
:returns: A list of lists, one list per column, of levels.
:examples:
>>> import numpy as np
>>> from random import randrange
>>> python_lists = np.random.randint(-2,2, (10000,2))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists,
... column_types=['enum', 'enum'])
>>> h2oframe.levels()
"""
lol = H2OFrame._expr(expr=ExprNode("levels", self)).as_data_frame(False)
lol.pop(0) # Remove column headers
lol = list(zip(*lol))
return [[ll for ll in l if ll != ''] for l in lol]
def nlevels(self):
"""
Get the number of factor levels for each categorical column.
:returns: A list of the number of levels per column.
:examples:
>>> python_lists = np.random.randint(-2,2, (10000,2))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists,
... column_types=['enum', 'enum'])
>>> h2oframe.nlevels()
"""
levels = self.levels()
return [len(l) for l in levels] if levels else 0
def set_level(self, level):
"""
A method to set all column values to one of the levels.
:param str level: The level at which the column will be set (a string)
:returns: H2OFrame with entries set to the desired level.
:examples:
>>> import numpy as np
>>> import random
>>> python_lists = np.random.randint(-5,5, (10000, 2))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> newFrame = h2oframe.asfactor()
>>> allLevels = newFrame.levels()
>>> lastLevel = allLevels[0][len(allLevels[0])-1]
>>> newFrame[0] = newFrame[0].set_level(level=lastLevel)
>>> firstLevel = allLevels[1][0]
>>> newFrame[1] = newFrame[1].set_level(level=firstLevel)
>>> newFrame
"""
return H2OFrame._expr(expr=ExprNode("setLevel", self, level), cache=self._ex._cache)
def set_levels(self, levels):
"""
Replace the levels of a categorical column.
New levels must be aligned with the old domain. This call has copy-on-write semantics.
:param List[str] levels: A list of strings specifying the new levels. The number of new
levels must match the number of old levels.
:returns: A single-column H2OFrame with the desired levels.
:examples:
>>> import numpy as np
>>> import random
>>> python_lists = np.random.randint(-5,5, (10000, 2))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> newFrame = h2oframe.asfactor()
>>> allLevels = newFrame.levels()
>>> newLevel0 = random.sample(allLevels[0], len(allLevels[0]))
>>> newLevel1 = random.sample(allLevels[1], len(allLevels[1]))
>>> newFrame[0] = newFrame[0].set_levels(levels=newLevel0)
>>> newFrame[1] = newFrame[1].set_levels(levels=newLevel1)
>>> newFrame
"""
assert_is_type(levels, [str])
return H2OFrame._expr(expr=ExprNode("setDomain", self, False, levels), cache=self._ex._cache)
def rename(self, columns=None):
"""
Change names of columns in the frame.
Dict key is an index or name of the column whose name is to be set.
Dict value is the new name of the column.
:param columns: dict-like transformations to apply to the column names
:returns: Renamed columns
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> iris
>>> name = iris.rename(columns={'C2':'C1',
... 'C1':'C2',
... 'C3':'X3',
... 'F0':'X0',
... 'C3':'Y3'})
>>> name
"""
assert_is_type(columns, None, dict)
new_names = self.names
ncols = self.ncols
for col, name in columns.items():
col_index = None
if is_type(col, int) and (-ncols <= col < ncols):
col_index = (col + ncols) % ncols # handle negative indices
elif is_type(col, str) and col in self.names:
col_index = self.names.index(col) # lookup the name
if col_index is not None:
new_names[col_index] = name
return self.set_names(new_names)
def set_names(self, names):
"""
Change names of all columns in the frame.
:param List[str] names: The list of new names for every column in the frame.
:returns: Frame with all new column names.
:examples:
>>> import numpy as np
>>> import random
>>> row_num = random.randrange(1,10)
>>> col_num = random.randrange(1,10)
>>> python_lists = np.random.randint(-5,5, (row_num, col_num))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> newNames = random.sample(h2oframe.names, col_num)
>>> h2oframe.set_names(names=newNames)
"""
assert_is_type(names, [str])
assert_satisfies(names, len(names) == self.ncol)
self._ex = ExprNode("colnames=", self, range(self.ncol), names) # Update-in-place, but still lazy
return self
def set_name(self, col=None, name=None):
"""
Set a new name for a column.
:param col: index or name of the column whose name is to be set; may be skipped for 1-column frames
:param name: the new name of the column
:returns: The renamed column.
:examples:
>>> import numpy as np
>>> import random
>>> row_num = random.randrange(1,10)
>>> col_num = random.randrange(1,10)
>>> python_lists = np.random.randint(-5,5, (row_num, col_num))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> newNames = random.sample(h2oframe.names, col_num)
>>> h2oframe.set_names(names=newNames)
>>> newName = "Dolphine"
>>> h2oframe.set_name(col=0, name=newName)
>>> h2oframe
"""
assert_is_type(col, None, int, str)
assert_is_type(name, str)
ncols = self.ncols
col_index = None
if is_type(col, int):
if not(-ncols <= col < ncols):
raise H2OValueError("Index %d is out of bounds for a frame with %d columns" % (col, ncols))
col_index = (col + ncols) % ncols # handle negative indices
elif is_type(col, str):
if col not in self.names:
raise H2OValueError("Column %s doesn't exist in the frame." % col)
col_index = self.names.index(col) # lookup the name
else:
assert col is None
if ncols != 1:
raise H2OValueError("The frame has %d columns; please specify which one to rename" % ncols)
col_index = 0
if name != self.names[col_index] and name in self.types:
raise H2OValueError("Column '%s' already exists in the frame" % name)
oldname = self.names[col_index]
old_cache = self._ex._cache
self._ex = ExprNode("colnames=", self, col_index, name) # Update-in-place, but still lazy
self._ex._cache.fill_from(old_cache)
if self.names is None:
self._frame()._ex._cache.fill()
else:
self._ex._cache._names = self.names[:col_index] + [name] + self.names[col_index + 1:]
self._ex._cache._types[name] = self._ex._cache._types.pop(oldname)
return
def as_date(self, format):
"""
Convert the frame (containing strings / categoricals) into the ``date`` format.
:param str format: the format string (e.g. "%Y-%m-%d")
:returns: new H2OFrame with "int" column types
:examples:
>>> hdf = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/jira/v-11-eurodate.csv")
>>> hdf["ds5"].as_date("%d.%m.%y %H:%M")
"""
fr = H2OFrame._expr(expr=ExprNode("as.Date", self, format), cache=self._ex._cache)
if fr._ex._cache.types_valid():
fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()}
return fr
def cumsum(self, axis=0):
"""
Compute cumulative sum over rows / columns of the frame.
:param int axis: 0 for column-wise, 1 for row-wise
:returns: new H2OFrame with cumulative sums of the original frame.
:examples:
>>> foo = h2o.H2OFrame([[x,y] for x,
... y in zip(list(range(10)),
... list(range(9,-1,-1)))])
>>> cumsum1 = foo[0].cumsum()
>>> cumsum1
>>> cumsum2 = foo[1].cumsum()
"""
return H2OFrame._expr(expr=ExprNode("cumsum", self, axis), cache=self._ex._cache)
def cumprod(self, axis=0):
"""
Compute cumulative product over rows / columns of the frame.
:param int axis: 0 for column-wise, 1 for row-wise
:returns: new H2OFrame with cumulative products of the original frame.
:examples:
>>> foo = h2o.H2OFrame([[x,y] for x,
... y in zip(list(range(10)),
... list(range(9,-1,-1)))])
>>> cumprod1 = foo[1:10,0].cumprod()
>>> cumprod1
>>> cumprod2 = foo[0:9,1].cumprod()
>>> cumprod2
"""
return H2OFrame._expr(expr=ExprNode("cumprod", self, axis), cache=self._ex._cache)
def cummin(self, axis=0):
"""
Compute cumulative minimum over rows / columns of the frame.
:param int axis: 0 for column-wise, 1 for row-wise
:returns: new H2OFrame with running minimums of the original frame.
:examples:
>>> foo = h2o.H2OFrame([[x,y] for x,
... y in zip(list(range(10)),
... list(range(9,-1,-1)))])
>>> cummin1 = foo[0].cummin()
>>> cummin1
>>> cummin2 = foo[1].cummin()
>>> cummin2
"""
return H2OFrame._expr(expr=ExprNode("cummin", self, axis), cache=self._ex._cache)
def cummax(self, axis=0):
"""
Compute cumulative maximum over rows / columns of the frame.
:param int axis: 0 for column-wise, 1 for row-wise
:returns: new H2OFrame with running maximums of the original frame.
:examples:
>>> foo = h2o.H2OFrame([[x,y] for x,
... y in zip(list(range(10)),
... list(range(9,-1,-1)))])
>>> cummax1 = foo[0].cummax()
>>> cummax1
>>> cummax2 = foo[1].cummax()
>>> cummax2
"""
return H2OFrame._expr(expr=ExprNode("cummax", self, axis), cache=self._ex._cache)
def prod(self, na_rm=False):
"""
Compute the product of all values across all rows in a single column H2O frame. If you apply
this command on a multi-column H2O frame, the answer may not be correct.
:param bool na_rm: If True then NAs will be ignored during the computation.
:returns: product of all values in the frame (a float)
:examples:
>>> import random
>>> import numpy as np
>>> data = [[random.uniform(1,10)] for c in range(10)]
>>> h2o_data = h2o.H2OFrame(data)
>>> np_data = np.array(data)
>>> h2o_data.prod(na_rm=True)
>>> np.prod(np_data)
"""
return ExprNode("prod.na" if na_rm else "prod", self)._eager_scalar()
def any(self):
"""
Determine whether any element in the frame is either True, non-zero, or NA.
:returns: (bool) True if any element in the frame is either True, non-zero or NA.
:examples:
>>> python_obj = [1,2,2.5,-100.9,0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.any()
"""
return bool(ExprNode("any", self)._eager_scalar())
def any_na_rm(self):
"""
Determine whether any value in the frame is non-zero.
:returns: (bool) True if any value in the frame is non-zero (disregarding all NAs).
:example:
>>> python_obj = [1,2,2.5,-100.9,0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.any_na_rm()
"""
return bool(ExprNode("any.na", self)._eager_scalar())
def all(self):
"""
Determine whether every element in the frame is either True, non-zero, or NA.
:returns: (bool) True if every element in the frame is either True, non-zero or NA.
:examples:
>>> python_obj = [1,2,2.5,-100.9,0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.all()
"""
return bool(ExprNode("all", self)._eager_scalar())
def isnumeric(self):
"""
Test which columns in the frame are numeric.
:returns: a list of True/False indicating for each column in the frame whether it is numeric.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> iris.summary()
# Look at the column headers: [0:3] are numeric; [4] is not
>>> iris[0].isnumeric()
# Return as True
>>> iris[4].isnumeric()
# Return as False
"""
return [bool(o) for o in ExprNode("is.numeric", self)._eager_scalar()]
def isstring(self):
"""
Test which columns in the frame are string.
:returns: a list of True/False indicating for each column in the frame whether it is string.
:examples:
>>> import numpy as np
>>> from random import randrange
>>> row_num = randrange(1,10)
>>> col_num = randrange(1,10)
>>> python_lists = np.random.randint(-5,5, (row_num, col_num))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> h2oframe.isstring()
>>> newFrame = h2oframe.asfactor().ascharacter()
>>> newFrame.isstring()
"""
return [bool(o) for o in ExprNode("is.character", self)._eager_scalar()]
def isin(self, item):
"""
Test whether elements of an H2OFrame are contained in the ``item``.
:param items: An item or a list of items to compare the H2OFrame against.
:returns: An H2OFrame of 0s and 1s showing whether each element in the original H2OFrame is contained in item.
:examples:
>>> fr = h2o.create_frame(rows=100, cols=1, categorical_fraction=1, factors=3)
>>> f2 = ~fr["C1"].isin(["c0.l0", "c0.l2"])
>>> f2
"""
if is_type(item, list, tuple, set):
if self.ncols == 1 and (self.type(0) == 'str' or self.type(0) == 'enum'):
return self.match(item)
else:
return functools.reduce(H2OFrame.__or__, (self == i for i in item))
else:
return self == item
def kfold_column(self, n_folds=3, seed=-1):
"""
Build a fold assignments column for cross-validation.
This method will produce a column having the same data layout as the source frame.
:param int n_folds: An integer specifying the number of validation sets to split the training data into.
:param int seed: Seed for random numbers as fold IDs are randomly assigned.
:returns: A single column H2OFrame with the fold assignments.
:examples:
>>> from random import randrange
>>> import numpy as np
>>> python_lists = np.random.randint(-5,5, (1000, 2))
>>> k = randrange(2,10)
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> clist = h2oframe.kfold_column(n_folds=k, seed=12345)
>>> clist
"""
return H2OFrame._expr(expr=ExprNode("kfold_column", self, n_folds, seed))._frame() # want this to be eager!
def modulo_kfold_column(self, n_folds=3):
"""
Build a fold assignments column for cross-validation.
Rows are assigned a fold according to the current row number modulo ``n_folds``.
:param int n_folds: An integer specifying the number of validation sets to split the training data into.
:returns: A single-column H2OFrame with the fold assignments.
:examples:
>>> from random import randrange
>>> import numpy as np
>>> python_lists = np.random.randint(-5,5, (1000, 2))
>>> k = randrange(2,10)
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> h2oframe.kfold_column(n_folds=k)
"""
return H2OFrame._expr(expr=ExprNode("modulo_kfold_column", self, n_folds))._frame() # want this to be eager!
def stratified_kfold_column(self, n_folds=3, seed=-1):
"""
Build a fold assignment column with the constraint that each fold has the same class
distribution as the fold column.
:param int n_folds: The number of folds to build.
:param int seed: A seed for the random number generator.
:returns: A single column H2OFrame with the fold assignments.
:examples:
>>> import numpy as np
>>> python_lists = np.random.randint(-3,3, (10000,2))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists).asfactor()
>>> h2oframe[1].stratified_kfold_column(n_folds=3, seed=-1)
"""
return H2OFrame._expr(
expr=ExprNode("stratified_kfold_column", self, n_folds, seed))._frame() # want this to be eager!
def structure(self):
"""
Compactly display the internal structure of an H2OFrame.
:returns: Compact display of the internal structure of an H2OFrame.
:examples:
>>> frame = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> frame.structure()
"""
df = self.as_data_frame(use_pandas=False)
cn = df.pop(0)
nr = self.nrow
nc = self.ncol
width = max([len(c) for c in cn])
isfactor = self.isfactor()
numlevels = self.nlevels()
lvls = self.levels()
print("H2OFrame: '{}' \nDimensions: {} obs. of {} variables".format(self.frame_id, nr, nc))
for i in range(nc):
print("$ {} {}: ".format(cn[i], ' ' * (width - max(0, len(cn[i])))), end=' ')
if isfactor[i]:
nl = numlevels[i]
print("Factor w/ {} level(s) {} ".format(nl, '"' + '","'.join(lvls[i]) + '"'), end='\n')
else:
print("num {}".format(" ".join(it[0] if it else "nan" for it in h2o.as_list(self[:10, i], False)[1:])))
def as_data_frame(self, use_pandas=True, header=True):
"""
Obtain the dataset as a python-local object.
:param bool use_pandas: If True (default) then return the H2OFrame as a pandas DataFrame (requires that the
``pandas`` library was installed). If False, then return the contents of the H2OFrame as plain nested
list, in a row-wise order.
:param bool header: If True (default), then column names will be appended as the first row in list
:returns: A python object (a list of lists of strings, each list is a row, if use_pandas=False, otherwise
a pandas DataFrame) containing this H2OFrame instance's data.
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> airlines["Year"]= airlines["Year"].asfactor()
>>> airlines["Month"]= airlines["Month"].asfactor()
>>> airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
>>> airlines["Cancelled"] = airlines["Cancelled"].asfactor()
>>> airlines['FlightNum'] = airlines['FlightNum'].asfactor()
>>> df = airlines.as_data_frame()
>>> df
"""
if can_use_pandas() and use_pandas:
import pandas
return pandas.read_csv(StringIO(self.get_frame_data()), low_memory=False, skip_blank_lines=False)
from h2o.utils.csv.readers import reader
frame = [row for row in reader(StringIO(self.get_frame_data()))]
if not header:
frame.pop(0)
return frame
def save_to_hive(self, jdbc_url, table_name, format="csv", table_path=None, tmp_path=None):
"""
Save contents of this data frame into a Hive table.
:param jdbc_url: Hive JDBC connection URL.
:param table_name: Table name into which to store the data. The table must not exist as it will be created
to match the structure of the the frame. The user must be allowed to create tables.
:param format: Storage format of created Hive table, can be either ``csv`` (default) or ``parquet``.
:param table_path: If specified, the table will be created as an external table and this is where the data
will be stored.
:param tmp_path: Path where to store temporary data.
:examples:
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> airlines["Year"] = airlines["Year"].asfactor()
>>> airlines.save_to_hive("jdbc:hive2://hive-server:10000/default", "airlines")
"""
assert_is_type(jdbc_url, str)
assert_is_type(table_name, str)
assert_is_type(format, Enum("csv", "parquet"))
assert_is_type(table_path, str, None)
assert_is_type(tmp_path, str, None)
p = {
"frame_id": self.frame_id,
"jdbc_url": jdbc_url,
"table_name": table_name,
"format": format,
"table_path": table_path,
"tmp_path": tmp_path
}
h2o.api("POST /3/SaveToHiveTable", data=p)
def get_frame_data(self):
"""
Get frame data as a string in csv format.
This will create a multiline string, where each line will contain a separate row of frame's data, with
individual values separated by commas.
:returns: Frame data as a string in csv format.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> iris.get_frame_data()
"""
return h2o.api("GET /3/DownloadDataset", data={"frame_id": self.frame_id, "hex_string": False, "escape_quotes" : True})
def __getitem__(self, item):
"""
Frame slicing, supports row and column slicing.
:param item: selector of a subframe. This can be one of the following:
- an int, indicating selection of a single column at the specified index (0-based)
- a string, selecting a column with the given name
- a list of ints or strings, selecting several columns with the given indices / names
- a slice, selecting columns with the indices within this slice
- a single-column boolean frame, selecting rows for which the selector is true
- a 2-element tuple, where the first element is a row selector, and the second element is the
column selector. Here the row selector may be one of: an int, a list of ints, a slice, or
a boolean frame. The column selector is similarly one of: an int, a list of ints, a string,
a list of strings, or a slice. It is also possible to use the empty slice (``:``) to select
all elements within one of the dimensions.
:returns: A new frame comprised of some rows / columns of the source frame.
:examples:
>>> fr[2] # All rows, 3rd column
>>> fr[-2] # All rows, 2nd column from end
>>> fr[:, -1] # All rows, last column
>>> fr[0:5, :] # First 5 rows, all columns
>>> fr[fr[0] > 1, :] # Only rows where first cell is greater than 1, all columns
>>> fr[[1, 5, 6]] # Columns 2, 6, and 7
>>> fr[0:50, [1,2,3]] # First 50 rows, columns 2, 3, and 4
"""
# Select columns based on a string, a list of strings, an int or a slice.
# Note that the python column selector handles the case of negative
# selections, or out-of-range selections - without having to compute
# self._ncols in the front-end - which would force eager evaluation just to
# range check in the front-end.
new_ncols = -1
new_nrows = -1
new_names = None
new_types = None
fr = None
flatten = False
if isinstance(item, slice):
item = normalize_slice(item, self.ncols)
if is_type(item, str, int, list, slice):
new_ncols, new_names, new_types, item = self._compute_ncol_update(item)
new_nrows = self.nrow
fr = H2OFrame._expr(expr=ExprNode("cols_py", self, item))
elif isinstance(item, (ExprNode, H2OFrame)):
new_ncols = self.ncol
new_names = self.names
new_types = self.types
new_nrows = -1 # have a "big" predicate column -- update cache later on...
fr = H2OFrame._expr(expr=ExprNode("rows", self, item))
elif isinstance(item, tuple):
rows, cols = item
allrows = allcols = False
if isinstance(cols, slice):
cols = normalize_slice(cols, self.ncols)
allcols = cols == slice(0, self.ncols, 1)
if isinstance(rows, slice):
rows = normalize_slice(rows, self.nrows)
allrows = rows == slice(0, self.nrows, 1)
if allrows and allcols: return self # fr[:,:] -> all rows and columns.. return self
if allrows:
new_ncols, new_names, new_types, cols = self._compute_ncol_update(cols)
new_nrows = self.nrow
fr = H2OFrame._expr(expr=ExprNode("cols_py", self, cols)) # fr[:,cols] -> really just a column slice
if allcols:
new_ncols = self.ncols
new_names = self.names
new_types = self.types
new_nrows, rows = self._compute_nrow_update(rows)
fr = H2OFrame._expr(expr=ExprNode("rows", self, rows)) # fr[rows,:] -> really just a row slices
if not allrows and not allcols:
new_ncols, new_names, new_types, cols = self._compute_ncol_update(cols)
new_nrows, rows = self._compute_nrow_update(rows)
fr = H2OFrame._expr(expr=ExprNode("rows", ExprNode("cols_py", self, cols), rows))
flatten = is_type(rows, int) and is_type(cols, str, int)
else:
raise ValueError("Unexpected __getitem__ selector: " + str(type(item)) + " " + str(item.__class__))
assert fr is not None
# Pythonic: if the row & col selector turn into ints (or a single col
# name), then extract the single element out of the Frame. Otherwise
# return a Frame, EVEN IF the selectors are e.g. slices-of-1-value.
if flatten:
return fr.flatten()
fr._ex._cache.ncols = new_ncols
fr._ex._cache.nrows = new_nrows
fr._ex._cache.names = new_names
fr._ex._cache.types = new_types
fr._is_frame = self._is_frame
return fr
def _compute_ncol_update(self, item): # computes new ncol, names, and types
new_ncols = -1
if isinstance(item, list):
new_ncols = len(item)
if _is_str_list(item):
new_types = {k: self.types[k] for k in item}
new_names = item
else:
new_names = [self.names[i] for i in item]
new_types = {name: self.types[name] for name in new_names}
elif isinstance(item, slice):
assert slice_is_normalized(item)
new_names = self.names[item]
new_types = {name: self.types[name] for name in new_names}
elif is_type(item, str, int):
new_ncols = 1
if is_type(item, str):
new_names = [item]
new_types = None if item not in self.types else {item: self.types[item]}
else:
new_names = [self.names[item]]
new_types = {new_names[0]: self.types[new_names[0]]}
else:
raise ValueError("Unexpected type: " + str(type(item)))
return (new_ncols, new_names, new_types, item)
def _compute_nrow_update(self, item):
if isinstance(item, list):
new_nrows = len(item)
elif isinstance(item, slice):
assert slice_is_normalized(item)
new_nrows = (item.stop - item.start + item.step - 1) // item.step
elif isinstance(item, H2OFrame):
new_nrows = -1
else:
new_nrows = 1
return [new_nrows, item]
def __setitem__(self, item, value):
"""
Replace, update or add column(s) in an H2OFrame.
:param item: A 0-based index of a column, or a column name, or a list of column names, or a slice.
Alternatively, this may also be a two-element tuple where the first element in the tuple is a row selector,
and the second element is a row selector. Finally, this can also be a boolean frame indicating which
rows/columns to modify. If ``item`` is a column name that does not exist in the frame, then a new column
will be appended to the current frame.
:param value: The value replacing elements at positions given by ``item``. This can be either a constant, or
another frame.
"""
# TODO: add far stronger type checks, so that we never run in a situation where the server has to
# tell us that we requested an illegal operation.
assert_is_type(item, str, int, tuple, list, H2OFrame)
assert_is_type(value, None, numeric, str, H2OFrame)
col_expr = None
row_expr = None
colname = None # When set, we are doing an append
if is_type(item, str): # String column name, could be new or old
if item in self.names:
col_expr = self.names.index(item) # Update an existing column
else:
col_expr = self.ncols
colname = item # New, append
elif is_type(item, int):
if not(-self.ncols <= item < self.ncols):
raise H2OValueError("Incorrect column index: %d" % item)
col_expr = item # Column by number
if col_expr < 0:
col_expr += self.ncols
elif isinstance(item, tuple): # Both row and col specifiers
# Need more type checks
row_expr = item[0]
col_expr = item[1]
if is_type(col_expr, str): # Col by name
if col_expr not in self.names: # Append
colname = col_expr
col_expr = self.ncol
elif is_type(col_expr, int):
if not(-self.ncols <= col_expr < self.ncols):
raise H2OValueError("Incorrect column index: %d" % item)
if col_expr < 0:
col_expr += self.ncols
elif isinstance(col_expr, slice): # Col by slice
if col_expr.start is None and col_expr.stop is None:
col_expr = slice(0, self.ncol) # Slice of all
if isinstance(row_expr, slice):
start = row_expr.start
step = row_expr.step
stop = row_expr.stop
if start is None: start = 0
if stop is None: stop = self.nrows
row_expr = slice(start, stop, step)
elif isinstance(item, H2OFrame):
row_expr = item # Row slicing
elif isinstance(item, list):
col_expr = item
if value is None: value = float("nan")
value_is_own_subframe = isinstance(value, H2OFrame) and self._is_frame_in_self(value)
old_cache = self._ex._cache
if colname is None:
self._ex = ExprNode(":=", self, value, col_expr, row_expr)
self._ex._cache.fill_from(old_cache)
if isinstance(value, H2OFrame) and \
value._ex._cache.types_valid() and \
self._ex._cache.types_valid():
self._ex._cache._types.update(value._ex._cache.types)
else:
self._ex._cache.types = None
else:
self._ex = ExprNode("append", self, value, colname)
self._ex._cache.fill_from(old_cache)
self._ex._cache.names = self.names + [colname]
self._ex._cache._ncols += 1
if self._ex._cache.types_valid() and isinstance(value, H2OFrame) and value._ex._cache.types_valid():
self._ex._cache._types[colname] = list(viewvalues(value._ex._cache.types))[0]
else:
self._ex._cache.types = None
if value_is_own_subframe:
value._ex = None # wipe out to keep ref counts correct
def _is_frame_in_self(self, frame):
if self._ex is frame._ex: return True
if frame._ex._children is None: return False
return any(self._is_expr_in_self(ch) for ch in frame._ex._children)
def _is_expr_in_self(self, expr):
if not isinstance(expr, ExprNode): return False
if self._ex is expr: return True
if expr._children is None: return False
return any(self._is_expr_in_self(ch) for ch in expr._children)
def drop(self, index, axis=1):
"""
Drop a single column or row or a set of columns or rows from a H2OFrame.
Dropping a column or row is not in-place.
Indices of rows and columns are zero-based.
:param index: A list of column indices, column names, or row indices to drop; or
a string to drop a single column by name; or an int to drop a single column by index.
:param int axis: If 1 (default), then drop columns; if 0 then drop rows.
:returns: a new H2OFrame with the respective dropped columns or rows. The original H2OFrame remains
unchanged.
:examples:
>>> pros = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip")
>>> nc = pros.ncol
>>> nr = pros.nrow
>>> dropped_col_int = pros.drop(0)
>>> dropped_col_int
"""
if axis == 1:
if not isinstance(index, list):
#If input is a string, i.e., "C1":
if is_type(index, str):
#Check if index is an actual column(s) in the frame
if index not in self.names:
raise H2OValueError("Column(s) selected to drop are not in original frame: %r" % index)
index = self.names.index(index)
#If input is an int indicating a column index, i.e., 3:
elif is_type(index, int):
#Check if index is an actual column index in the frame
if index > self.ncol:
raise H2OValueError("Column index selected to drop is not part of the frame: %r" % index)
if index < 0:
raise H2OValueError("Column index selected to drop is not positive: %r" % index)
fr = H2OFrame._expr(expr=ExprNode("cols", self, -(index + 1)), cache=self._ex._cache)
fr._ex._cache.ncols -= 1
fr._ex._cache.names = self.names[:index] + self.names[index + 1:]
fr._ex._cache.types = {name: self.types[name] for name in fr._ex._cache.names}
return fr
elif isinstance(index, list):
#If input is an int array indicating a column index, i.e., [3] or [1,2,3]:
if is_type(index, [int]):
if max(index) > self.ncol:
raise H2OValueError("Column index selected to drop is not part of the frame: %r" % index)
if min(index) < 0:
raise H2OValueError("Column index selected to drop is not positive: %r" % index)
index = [-(i + 1) for i in index]
#If index is a string array, i.e., ["C1", "C2"]
elif is_type(index, [str]):
#Check if index is an actual column(s) in the frame
if not set(index).issubset(self.names):
raise H2OValueError("Column(s) selected to drop are not in original frame: %r" % index)
index = [-(self.names.index(i) + 1) for i in index]
fr = H2OFrame._expr(expr=ExprNode("cols", self, index), cache=self._ex._cache)
fr._ex._cache.ncols -= len(index)
fr._ex._cache.names = [i for i in self.names
if self.names.index(i) not in list(map(lambda x: abs(x) - 1, index))]
fr._ex._cache.types = {name: fr.types[name] for name in fr._ex._cache.names}
else:
raise ValueError("Invalid column index types. Must either be a list of all int indexes, "
"a string list of all column names, a single int index, or"
"a single string for dropping columns.")
return fr
elif axis == 0:
if is_type(index, [int]):
#Check if index is an actual column index in the frame
if max(index) > self.nrow:
raise H2OValueError("Row index selected to drop is not part of the frame: %r" % index)
if min(index) < 0:
raise H2OValueError("Row index selected to drop is not positive: %r" % index)
index = [-(x + 1) for x in index]
fr = H2OFrame._expr(expr=ExprNode("rows", self, index), cache=self._ex._cache)
fr._ex._cache.nrows -= len(index)
else:
raise ValueError("Invalid row indexes. Must be a list of int row indexes to drop from the H2OFrame.")
return fr
def pop(self, i):
"""
Pop a column from the H2OFrame at index i.
:param i: The index (int) or name (str) of the column to pop.
:returns: an H2OFrame containing the column dropped from the current frame; the current frame is modified
in-place and loses the column.
:examples:
>>> prostate = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip")
>>> nc = prostate.ncol
>>> prostate
>>> popped_col = prostate.pop(prostate.names[0])
>>> prostate
>>> popped_col
"""
if is_type(i, str): i = self.names.index(i)
col = H2OFrame._expr(expr=ExprNode("cols", self, i))
old_cache = self._ex._cache
self._ex = ExprNode("cols", self, -(i + 1))
self._ex._cache.ncols -= 1
self._ex._cache.names = old_cache.names[:i] + old_cache.names[i + 1:]
self._ex._cache.types = {name: old_cache.types[name] for name in self._ex._cache.names}
self._ex._cache._data = None
col._ex._cache.ncols = 1
col._ex._cache.names = [old_cache.names[i]]
return col
def quantile(self, prob=None, combine_method="interpolate", weights_column=None):
"""
Compute quantiles.
:param List[float] prob: list of probabilities for which quantiles should be computed.
:param str combine_method: for even samples this setting determines how to combine quantiles. This can be
one of ``"interpolate"``, ``"average"``, ``"low"``, ``"high"``.
:param weights_column: optional weights for each row. If not given, all rows are assumed to have equal
importance. This parameter can be either the name of column containing the observation weights in
this frame, or a single-column separate H2OFrame of observation weights.
:returns: a new H2OFrame containing the quantiles and probabilities.
:examples:
>>> data = [[random.uniform(-10000,10000)] for c in range(1000)]
>>> h2o_data = h2o.H2OFrame(data)
>>> np_data = np.array(data)
>>> h2o_data.quantile(prob=None,
... combine_method='interpolate',
... weights_column=None)
"""
if len(self) == 0: return self
if prob is None: prob = [0.01, 0.1, 0.25, 0.333, 0.5, 0.667, 0.75, 0.9, 0.99]
if weights_column is None:
weights_column = "_"
else:
assert_is_type(weights_column, str, I(H2OFrame, lambda wc: wc.ncol == 1 and wc.nrow == self.nrow))
if isinstance(weights_column, H2OFrame):
merged = self.cbind(weights_column)
weights_column = merged.names[-1]
return H2OFrame._expr(expr=ExprNode("quantile", merged, prob, combine_method, weights_column))
return H2OFrame._expr(expr=ExprNode("quantile", self, prob, combine_method, weights_column))
def concat(self, frames, axis=1):
"""
Append multiple H2OFrames to this frame, column-wise or row-wise.
:param List[H2OFrame] frames: list of frames that should be appended to the current frame.
:param int axis: if 1 then append column-wise (default), if 0 then append row-wise.
:returns: an H2OFrame of the combined datasets.
:examples:
>>> df1 = h2o.create_frame(integer_fraction=1,binary_fraction=0,
... categorical_fraction=0,seed=1)
>>> df2 = h2o.create_frame(integer_fraction=1,binary_fraction=0,
... categorical_fraction=0,seed=2)
>>> df3 = h2o.create_frame(integer_fraction=1,binary_fraction=0,
... categorical_fraction=0,seed=3)
>>> df123 = df1.concat([df2,df3])
"""
if len(frames) == 0:
raise ValueError("Input list of frames is empty! Nothing to concat.")
if axis == 1:
df = self.cbind(frames)
else:
df = self.rbind(frames)
return df
def cbind(self, data):
"""
Append data to this frame column-wise.
:param H2OFrame data: append columns of frame ``data`` to the current frame. You can also cbind a number,
in which case it will get converted into a constant column.
:returns: new H2OFrame with all frames in ``data`` appended column-wise.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> iris = iris.cbind(iris[4] == "Iris-setosa")
>>> iris[5] = iris[5].asfactor()
>>> iris.set_name(5,"C6")
>>> iris = iris.cbind(iris[4] == "Iris-virginica")
>>> iris[6] = iris[6].asfactor()
>>> iris.set_name(6, name="C7")
>>> print(iris)
"""
assert_is_type(data, H2OFrame, numeric, [H2OFrame, numeric])
frames = [data] if not isinstance(data, list) else data
new_cols = list(self.columns)
new_types = dict(self.types)
for frame in frames:
if isinstance(frame, H2OFrame):
if frame.nrow != self.nrow:
raise H2OValueError("Cannot bind a dataframe with %d rows to a data frame with %d rows: "
"the number of rows should match" % (frame.nrow, self.nrow))
new_cols += frame.columns
new_types.update(frame.types)
else:
new_cols += [None]
unique_cols = set(new_cols)
fr = H2OFrame._expr(expr=ExprNode("cbind", self, *frames), cache=self._ex._cache)
fr._ex._cache.ncols = len(new_cols)
if len(new_cols) == len(unique_cols) and None not in unique_cols:
fr._ex._cache.names = new_cols
fr._ex._cache.types = new_types
else:
# Invalidate names and types since they contain duplicate / unknown names, and the server will choose those.
fr._ex._cache.names = None
fr._ex._cache.types = None
return fr
def rbind(self, data):
"""
Append data to this frame row-wise.
:param data: an H2OFrame or a list of H2OFrame's to be combined with current frame row-wise.
:returns: this H2OFrame with all frames in data appended row-wise.
:examples:
>>> frame = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars.csv")
>>> nrows = frame.nrow
>>> nrows
>>> frame2 = frame.rbind(frame)
>>> nrows2 = frame2.nrow
>>> nrows2
"""
assert_is_type(data, H2OFrame, [H2OFrame])
frames = [data] if not isinstance(data, list) else data
for frame in frames:
if frame.ncol != self.ncol:
raise H2OValueError("Cannot row-bind a dataframe with %d columns to a data frame with %d columns: "
"the columns must match" % (frame.ncol, self.ncol))
if frame.columns != self.columns:
raise H2OValueError("Column names must match for rbind() to work")
if frame.types != self.types: # compare the whole list here
validTypes = [u'float', u'real', u'double', u'int', u'long', u'numeric']
for eachKey in frame.types.keys():
sametypes = frame.types[eachKey]==self.types[eachKey]
bothNumericTypes = (frame.types[eachKey] in validTypes) and (self.types[eachKey] in validTypes)
if not(sametypes) and not(bothNumericTypes):
raise H2OValueError("Column types must match for rbind() to work. First column type {0}. "
"Second column type {1})".format(self.types[eachKey], frame.types[eachKey]))
fr = H2OFrame._expr(expr=ExprNode("rbind", self, *frames), cache=self._ex._cache)
fr._ex._cache.nrows = self.nrow + sum(frame.nrow for frame in frames)
return fr
def split_frame(self, ratios=None, destination_frames=None, seed=None):
"""
Split a frame into distinct subsets of size determined by the given ratios.
The number of subsets is always 1 more than the number of ratios given. Note that
this does not give an exact split. H2O is designed to be efficient on big data
using a probabilistic splitting method rather than an exact split. For example
when specifying a split of 0.75/0.25, H2O will produce a test/train split with
an expected value of 0.75/0.25 rather than exactly 0.75/0.25. On small datasets,
the sizes of the resulting splits will deviate from the expected value more than
on big data, where they will be very close to exact.
:param List[float] ratios: The fractions of rows for each split.
:param List[str] destination_frames: The names of the split frames.
:param int seed: seed for the random number generator
:returns: A list of H2OFrames
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> iris
>>> train, valid = iris.split_frame(ratios=[.8])
>>> train
>>> valid
"""
assert_is_type(ratios, [numeric], None)
assert_is_type(destination_frames, [str], None)
assert_is_type(seed, int, None)
if ratios is None:
ratios = [0.75]
if not ratios:
raise ValueError("Ratios array may not be empty")
if destination_frames is not None:
if len(ratios) + 1 != len(destination_frames):
raise ValueError("The number of provided destination_frames must be one more "
"than the number of provided ratios")
num_slices = len(ratios) + 1
boundaries = []
last_boundary = 0
i = 0
while i < num_slices - 1:
ratio = ratios[i]
if ratio < 0:
raise ValueError("Ratio must be greater than 0")
boundary = last_boundary + ratio
if boundary >= 1.0:
raise ValueError("Ratios must add up to less than 1.0")
boundaries.append(boundary)
last_boundary = boundary
i += 1
splits = []
tmp_runif = self.runif(seed)
i = 0
while i < num_slices:
if i == 0:
# lower_boundary is 0.0
upper_boundary = boundaries[i]
tmp_slice = self[(tmp_runif <= upper_boundary), :]
elif i == num_slices - 1:
lower_boundary = boundaries[i - 1]
# upper_boundary is 1.0
tmp_slice = self[(tmp_runif > lower_boundary), :]
else:
lower_boundary = boundaries[i - 1]
upper_boundary = boundaries[i]
tmp_slice = self[((tmp_runif > lower_boundary) & (tmp_runif <= upper_boundary)), :]
if destination_frames is None:
splits.append(tmp_slice)
else:
destination_frame_id = destination_frames[i]
tmp_slice.frame_id = destination_frame_id
splits.append(tmp_slice)
i += 1
for split in splits:
split.refresh() # Force the split now (otherwise done lazily) to immediately delete tmp_runif
h2o.remove(tmp_runif)
del tmp_runif
return splits
def group_by(self, by):
"""
Return a new ``GroupBy`` object using this frame and the desired grouping columns.
The returned groups are sorted by the natural group-by column sort.
:param by: The columns to group on (either a single column name, or a list of column names, or
a list of column indices).
:returns: New ``GroupBy`` object, sorted by the natural group-by column sort.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> na_handling = ["rm","ignore","all"]
>>> for na in na_handling:
... grouped = iris.group_by("class")
... grouped
... .count(na=na)
... .min(na=na)
... .max(na=na)
... .mean(na=na)
... .var(na=na)
... .sd(na=na)
... .ss(na=na)
... .sum(na=na)
... print(grouped.get_frame())
... print(grouped.get_frame())
"""
assert_is_type(by, str, int, [str, int])
return GroupBy(self, by)
def sort(self, by, ascending=[]):
"""
Return a new Frame that is sorted by column(s) in ascending order. A fully distributed and parallel sort.
However, the original frame can contain String columns but sorting cannot be done on String columns.
Default sorting direction is ascending.
:param by: The column to sort by (either a single column name, or a list of column names, or
a list of column indices)
:param ascending: Boolean array to denote sorting direction for each sorting column. True for ascending
sort and False for descending sort.
:return: a new sorted Frame
:examples:
>>> df = h2o.create_frame(rows=10,
... cols=3,
... factors=10,
... categorical_fraction=1.0/3,
... time_fraction=1.0/3,
... real_fraction=1.0/3,
... real_range=100,
... missing_fraction=0.0,
... seed=123)
>>> df.sort("C1")
"""
assert_is_type(by, str, int, [str, int])
if type(by) != list: by = [by]
if type(ascending) != list: ascending = [ascending] # convert to list
ascendingI=[1]*len(by) # intitalize sorting direction to ascending by default
for c in by:
if self.type(c) not in ["enum","time","int","real","string"]:
raise H2OValueError("Sort by column: " + str(c) + " not of enum, time, int, real, or string type")
if len(ascending)>0: # user did not specify sort direction, assume all columns ascending
assert len(ascending)==len(by), "Sorting direction must be specified for each sorted column."
for index in range(len(by)):
ascendingI[index]=1 if ascending[index] else -1
return H2OFrame._expr(expr=ExprNode("sort",self,by,ascendingI))
def fillna(self,method="forward",axis=0,maxlen=1):
"""
Return a new Frame that fills NA along a given axis and along a given direction with a maximum fill length.
:param method: ``"forward"`` or ``"backward"``
:param axis: 0 for columnar-wise or 1 for row-wise fill
:param maxlen: Max number of consecutive NA's to fill
:returns: A new Frame that fills NA along a given axis and along a given direction with a maximum fill length.
:examples:
>>> python_obj = [1,2,2.5,-100.9,0,',',',',',']
>>> frame = h2o.H2OFrame(python_obj)
>>> frame
>>> frame.fillna(method='forward', axis=0, maxlen=3)
"""
assert_is_type(axis, 0, 1)
assert_is_type(method,str)
assert_is_type(maxlen, int)
return H2OFrame._expr(expr=ExprNode("h2o.fillna",self,method,axis,maxlen))
def impute(self, column=-1, method="mean", combine_method="interpolate", by=None, group_by_frame=None, values=None):
"""
Impute missing values into the frame, modifying it in-place.
:param int column: Index of the column to impute, or -1 to impute the entire frame.
:param str method: The method of imputation: ``"mean"``, ``"median"``, or ``"mode"``.
:param str combine_method: When the method is ``"median"``, this setting dictates how to combine quantiles
for even samples. One of ``"interpolate"``, ``"average"``, ``"low"``, ``"high"``.
:param by: The list of columns to group on.
:param H2OFrame group_by_frame: Impute the values with this pre-computed grouped frame.
:param List values: The list of impute values, one per column. None indicates to skip the column.
:returns: A list of values used in the imputation or the group-by result used in imputation.
:examples:
>>> prostate = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip")
>>> prostate.dim
>>> prostate.impute("DPROS", method="mean")
"""
if is_type(column, str): column = self.names.index(column)
if is_type(by, str): by = self.names.index(by)
if values is None:
values = "_"
else:
assert len(values) == len(self.columns), "Length of values does not match length of columns"
# convert string values to categorical num values
values2 = []
for i in range(0,len(values)):
if self.type(i) == "enum":
try:
values2.append(self.levels()[i].index(values[i]))
except:
raise H2OValueError("Impute value of: " + values[i] + " not found in existing levels of"
" column: " + self.col_names[i])
else:
values2.append(values[i])
values = values2
if group_by_frame is None: group_by_frame = "_"
# This code below is needed to ensure the frame (self) exists on the server. Without it, self._ex._cache.fill()
# fails with an assertion that ._id is None.
# This code should be removed / reworked once we have a more consistent strategy of dealing with frames.
self._ex._eager_frame()
if by is not None or group_by_frame is not "_":
res = H2OFrame._expr(
expr=ExprNode("h2o.impute", self, column, method, combine_method, by, group_by_frame, values))._frame()
else:
res = ExprNode("h2o.impute", self, column, method, combine_method, by, group_by_frame,
values)._eager_scalar()
self._ex._cache.flush()
self._ex._cache.fill(10)
return res
def merge(self, other, all_x=False, all_y=False, by_x=None, by_y=None, method="auto"):
"""
Merge two datasets based on common column names. We do not support all_x=True and all_y=True.
Only one can be True or none is True. The default merge method is auto and it will default to the
radix method. The radix method will return the correct merge result regardless of duplicated rows
in the right frame. In addition, the radix method can perform merge even if you have string columns
in your frames. If there are duplicated rows in your rite frame, they will not be included if you use
the hash method. The hash method cannot perform merge if you have string columns in your left frame.
Hence, we consider the radix method superior to the hash method and is the default method to use.
:param H2OFrame other: The frame to merge to the current one. By default, must have at least one column in common with
this frame, and all columns in common are used as the merge key. If you want to use only a subset of the
columns in common, rename the other columns so the columns are unique in the merged result.
:param bool all_x: If True, include all rows from the left/self frame
:param bool all_y: If True, include all rows from the right/other frame
:param by_x: list of columns in the current frame to use as a merge key.
:param by_y: list of columns in the ``other`` frame to use as a merge key. Should have the same number of
columns as in the ``by_x`` list.
:param method: string representing the merge method, one of auto(default), radix or hash.
:returns: New H2OFrame with the result of merging the current frame with the ``other`` frame.
:examples:
>>> col = 10000* [0, 0, 1, 1, 2, 3, 0]
>>> fr = h2o.H2OFrame(list(zip(*[col])))
>>> fr.set_names(['rank'])
>>> mapping = h2o.H2OFrame(list(zip(*[[0,1,2,3],[6,7,8,9]])))
>>> mapping.set_names(['rank', 'outcome'])
>>> merged = fr.merge(mapping,
... all_x=True,
... all_y=False,
... by_x=None,
... by_y=None,
... method='auto')
>>> merged
"""
if by_x is None and by_y is None:
common_names = list(set(self.names) & set(other.names))
if not common_names:
raise H2OValueError("No columns in common to merge on!")
if by_x is None:
by_x = [self.names.index(c) for c in common_names]
else:
by_x = _getValidCols(by_x,self)
if by_y is None:
by_y = [other.names.index(c) for c in common_names]
else:
by_y = _getValidCols(by_y,other)
return H2OFrame._expr(expr=ExprNode("merge", self, other, all_x, all_y, by_x, by_y, method))
def relevel(self, y):
"""
Reorder levels of an H2O factor for one single column of a H2O frame
The levels of a factor are reordered such that the reference level is at level 0, all remaining levels are
moved down as needed.
:param str y: The reference level
:returns: New reordered factor column
:examples:
>>> import numpy as np
>>> python_lists = np.random.randint(-5,5, (100, 2))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> newFrame = h2oframe.asfactor()
>>> allLevels = newFrame.levels()
>>> lastLevels = len(allLevels[0])-1
>>> newZeroLevel = allLevels[0][lastLevels]
>>> newFrame[0] = newFrame[0].relevel(newZeroLevel)
>>> newLevels = newFrame.levels()
>>> newLevels
"""
return H2OFrame._expr(expr=ExprNode("relevel", self, quote(y)))
def insert_missing_values(self, fraction=0.1, seed=None):
"""
Insert missing values into the current frame, modifying it in-place.
Randomly replaces a user-specified fraction of entries in a H2O dataset with missing
values.
:param float fraction: A number between 0 and 1 indicating the fraction of entries to replace with missing.
:param int seed: The seed for the random number generator used to determine which values to make missing.
:returns: the original H2OFrame with missing values inserted.
:examples:
>>> data = [[1, 2, 3, 1, 'a', 1, 9],
... [1, 6, 4, 2, 'a', 1, 9],
... [2, 3, 8, 6, 'b', 1, 9],
... [3, 4, 3, 2, 'b', 3, 8],
... [4, 5, 9, 5, 'c', 2, 8],
... [5, 7, 10,7, 'b', 8, 8]]
>>> h2o_data = h2o.H2OFrame(data)
>>> h2o_data.insert_missing_values(fraction = 0.0)
"""
kwargs = {}
kwargs['dataset'] = self.frame_id # Eager; forces eval now for following REST call
kwargs['fraction'] = fraction
if seed is not None: kwargs['seed'] = seed
job = {}
job['job'] = h2o.api("POST /3/MissingInserter", data=kwargs)
H2OJob(job, job_type=("Insert Missing Values")).poll()
self._ex._cache.flush()
return self
def min(self):
"""
Show the minimum value of all frame entries.
:returns: The minimum value of all frame entries.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> iris.min()
>>> iris["C1"].min()
>>> iris["C2"].min()
>>> iris["C3"].min()
>>> iris["C4"].min()
"""
return ExprNode("min", self)._eager_scalar()
def max(self):
"""
Show the maximum value of all frame entries.
:returns: The maximum value of all frame entries.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> iris.max()
>>> iris["C1"].max()
>>> iris["C2"].max()
>>> iris["C3"].max()
>>> iris["C4"].max()
"""
return ExprNode("max", self)._eager_scalar()
def sum(self, skipna=True, axis=0, **kwargs):
"""
Compute the frame's sum by-column (or by-row).
:param bool skipna: If True (default), then NAs are ignored during the computation. Otherwise presence
of NAs renders the entire result NA.
:param int axis: Direction of sum computation. If 0 (default), then sum is computed columnwise, and the result
is a frame with 1 row and number of columns as in the original frame. If 1, then sum is computed rowwise
and the result is a frame with 1 column (called "sum"), and number of rows equal to the number of rows
in the original frame. For row or column sums, the ``return_frame`` parameter must be True.
:param bool return_frame: A boolean parameter that indicates whether to return an H2O frame or one single aggregated value. Default is False.
:returns: either an aggregated value with sum of values per-column (old semantic); or an H2OFrame containing sum of values
per-column/per-row in the original frame (new semantic). The new semantic is triggered by either
providing the ``return_frame=True`` parameter, or having the ``general.allow_breaking_changed`` config
option turned on.
:examples:
>>> from random import randrange
>>> import numpy as np
>>> row_num = randrange(1,10)
>>> col_num = randrange(1,10)
>>> python_lists = np.random.randint(-5,5,(row_num,col_num))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> h2oframe.sum(skipna=False,axis=0)
"""
assert_is_type(skipna, bool)
assert_is_type(axis, 0, 1)
# Deprecated since 2016-10-14,
if "na_rm" in kwargs:
warnings.warn("Parameter na_rm is deprecated; use skipna instead", category=H2ODeprecationWarning)
na_rm = kwargs.pop("na_rm")
assert_is_type(na_rm, bool)
skipna = na_rm # don't assign to skipna directly, to help with error reporting
# Determine whether to return a frame or a list
return_frame = get_config_value("general.allow_breaking_changes", False)
if "return_frame" in kwargs:
return_frame = kwargs.pop("return_frame")
assert_is_type(return_frame, bool)
if kwargs:
raise H2OValueError("Unknown parameters %r" % list(kwargs))
if return_frame:
return H2OFrame._expr(ExprNode("sumaxis", self, skipna, axis))
else:
return ExprNode("sumNA" if skipna else "sum", self)._eager_scalar()
def mean(self, skipna=True, axis=0, **kwargs):
"""
Compute the frame's means by-column (or by-row).
:param bool skipna: If True (default), then NAs are ignored during the computation. Otherwise presence
of NAs renders the entire result NA.
:param int axis: Direction of mean computation. If 0 (default), then mean is computed columnwise, and the
result is a frame with 1 row and number of columns as in the original frame. If 1, then mean is computed
rowwise and the result is a frame with 1 column (called "mean"), and number of rows equal to the number
of rows in the original frame.
:returns: either a list of mean values per-column (old semantic); or an H2OFrame containing mean values
per-column/per-row from the original frame (new semantic). The new semantic is triggered by either
providing the ``return_frame=True`` parameter, or having the ``general.allow_breaking_changed`` config
option turned on.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> iris.mean()
"""
assert_is_type(skipna, bool)
assert_is_type(axis, 0, 1)
# Deprecated since 2016-10-14,
if "na_rm" in kwargs:
warnings.warn("Parameter na_rm is deprecated; use skipna instead", category=H2ODeprecationWarning)
na_rm = kwargs.pop("na_rm")
assert_is_type(na_rm, bool)
skipna = na_rm # don't assign to skipna directly, to help with error reporting
# Determine whether to return a frame or a list
return_frame = get_config_value("general.allow_breaking_changes", False)
if "return_frame" in kwargs:
return_frame = kwargs.pop("return_frame")
assert_is_type(return_frame, bool)
if kwargs:
raise H2OValueError("Unknown parameters %r" % list(kwargs))
new_frame = H2OFrame._expr(ExprNode("mean", self, skipna, axis))
if return_frame:
return new_frame
else:
return new_frame.getrow()
def skewness(self, na_rm=False):
"""
Compute the skewness of each column in the frame.
:param bool na_rm: If True, then ignore NAs during the computation.
:returns: A list containing the skewness for each column (NaN for non-numeric columns).
:examples:
>>> import numpy as np
>>> python_lists = np.random.uniform(-1,1, (10000,2))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> h2oframe.skewness()
"""
return ExprNode("skewness", self, na_rm)._eager_scalar()
def kurtosis(self, na_rm=False):
"""
Compute the kurtosis of each column in the frame.
We calculate the common kurtosis, such that kurtosis(normal distribution) is 3.
:param bool na_rm: If True, then ignore NAs during the computation.
:returns: A list containing the kurtosis for each column (NaN for non-numeric columns).
:examples:
>>> import numpy as np
>>> from random import randrange
>>> python_lists = np.random.normal(0,1, (10000, 1))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> h2oframe.kurtosis(na_rm=True)
"""
return ExprNode("kurtosis", self, na_rm)._eager_scalar()
def nacnt(self):
"""
Count of NAs for each column in this H2OFrame.
:returns: A list of the na counts (one entry per column).
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader_NA_2.csv")
>>> iris.nacnt()
"""
return ExprNode("naCnt", self)._eager_scalar()
def median(self, na_rm=False):
"""
Compute the median of each column in the frame.
:param bool na_rm: If True, then ignore NAs during the computation.
:returns: A list containing the median for each column (NaN for non-numeric columns).
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> iris.median()
"""
return ExprNode("median", self, na_rm)._eager_scalar()
def var(self, y=None, na_rm=False, use=None):
"""
Compute the variance-covariance matrix of one or two H2OFrames.
:param H2OFrame y: If this parameter is given, then a covariance matrix between the columns of the target
frame and the columns of ``y`` is computed. If this parameter is not provided then the covariance matrix
of the target frame is returned. If target frame has just a single column, then return the scalar variance
instead of the matrix. Single rows are treated as single columns.
:param str use: A string indicating how to handle missing values. This could be one of the following:
- ``"everything"``: outputs NaNs whenever one of its contributing observations is missing
- ``"all.obs"``: presence of missing observations will throw an error
- ``"complete.obs"``: discards missing values along with all observations in their rows so that only
complete observations are used
:param bool na_rm: an alternative to ``use``: when this is True then default value for ``use`` is
``"everything"``; and if False then default ``use`` is ``"complete.obs"``. This parameter has no effect
if ``use`` is given explicitly.
:returns: An H2OFrame of the covariance matrix of the columns of this frame (if ``y`` is not given),
or with the columns of ``y`` (if ``y`` is given). However when this frame and ``y`` are both single rows
or single columns, then the variance is returned as a scalar.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> iris.var(y=iris, na_rm=True, use=None)
"""
symmetric = False
if y is None:
y = self
symmetric = True
if use is None: use = "complete.obs" if na_rm else "everything"
if self.nrow == 1 or (self.ncol == 1 and y.ncol == 1):
return ExprNode("var", self, y, use, symmetric)._eager_scalar()
return H2OFrame._expr(expr=ExprNode("var", self, y, use, symmetric))._frame()
def sd(self, na_rm=False):
"""
Compute the standard deviation for each column in the frame.
:param bool na_rm: if True, then NAs will be removed from the computation.
:returns: A list containing the standard deviation for each column (NaN for non-numeric columns).
:examples:
>>> import numpy as np
>>> python_lists = np.random.uniform(1, 10, (10000,2))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> newframe = h2oframe.scale(center=True, scale=True)
>>> frameMean = newframe.mean()
>>> newframe.sd()
"""
return ExprNode("sd", self, na_rm)._eager_scalar()
def cor(self, y=None, na_rm=False, use=None, method="Pearson"):
"""
Compute the correlation matrix of one or two H2OFrames.
:param H2OFrame y: If this parameter is provided, then compute correlation between the columns of ``y``
and the columns of the current frame. If this parameter is not given, then just compute the correlation
matrix for the columns of the current frame.
:param str use: A string indicating how to handle missing values. This could be one of the following:
- ``"everything"``: outputs NaNs whenever one of its contributing observations is missing
- ``"all.obs"``: presence of missing observations will throw an error
- ``"complete.obs"``: discards missing values along with all observations in their rows so that only
complete observations are used
:param bool na_rm: an alternative to ``use``: when this is True then default value for ``use`` is
``"everything"``; and if False then default ``use`` is ``"complete.obs"``. This parameter has no effect
if ``use`` is given explicitly.
:param str method: Which method to use - value must be in ["Pearson", "Spearman"]. Defaults to "Pearson".
:returns: An H2OFrame of the correlation matrix of the columns of this frame (if ``y`` is not given),
or with the columns of ``y`` (if ``y`` is given). However when this frame and ``y`` are both single rows
or single columns, then the correlation is returned as a scalar.
:examples:
>>> import numpy as np
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> irisnp = np.genfromtxt(("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv"), delimiter=',', skip_header=1, usecols=(0,1,2,3))
>>> cor_np = h2o.H2OFrame(np.corrcoef(irisnp,rowvar=0))
>>> cor_h2o = iris[0:4].cor()
>>> cor_diff = abs(cor_h2o - cor_np)
>>> print(cor_diff)
"""
assert_is_type(y, H2OFrame, None)
assert_is_type(na_rm, bool)
assert_is_type(use, None, "everything", "all.obs", "complete.obs")
if y is None:
y = self
if use is None: use = "complete.obs" if na_rm else "everything"
if self.nrow == 1 or (self.ncol == 1 and y.ncol == 1): return ExprNode("cor", self, y, use, method)._eager_scalar()
return H2OFrame._expr(expr=ExprNode("cor", self, y, use, method))._frame()
def distance(self, y, measure=None):
"""
Compute a pairwise distance measure between all rows of two numeric H2OFrames.
:param H2OFrame y: Frame containing queries (small)
:param str use: A string indicating what distance measure to use. Must be one of:
- ``"l1"``: Absolute distance (L1-norm, >=0)
- ``"l2"``: Euclidean distance (L2-norm, >=0)
- ``"cosine"``: Cosine similarity (-1...1)
- ``"cosine_sq"``: Squared Cosine similarity (0...1)
:returns: An H2OFrame of the matrix containing pairwise distance / similarity between the
rows of this frame (N x p) and ``y`` (M x p), with dimensions (N x M).
:examples:
>>> iris_h2o = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> references = iris_h2o[10:150,0:4]
>>> queries = iris_h2o[0:10,0:4]
>>> A = references.distance(queries, "l1")
>>> B = references.distance(queries, "l2")
>>> C = references.distance(queries, "cosine")
>>> D = references.distance(queries, "cosine_sq")
>>> E = queries.distance(references, "l1")
>>> (E.transpose() == A).all()
"""
assert_is_type(y, H2OFrame)
if measure is None: measure = "l2"
return H2OFrame._expr(expr=ExprNode("distance", self, y, measure))._frame()
def drop_duplicates(self, columns, keep = "first"):
assert_is_type(columns, [int], [str])
assert_is_type(keep, Enum("first", "last"))
return H2OFrame._expr(expr=ExprNode("dropdup", self, columns, keep))._frame()
def strdistance(self, y, measure=None, compare_empty=True):
"""
Compute element-wise string distances between two H2OFrames. Both frames need to have the same
shape and only contain string/factor columns.
:param H2OFrame y: A comparison frame.
:param str measure: A string identifier indicating what string distance measure to use. Must be one of:
- ``"lv"``: Levenshtein distance
- ``"lcs"``: Longest common substring distance
- ``"qgram"``: q-gram distance
- ``"jaccard"``: Jaccard distance between q-gram profiles
- ``"jw"``: Jaro, or Jaro-Winker distance
- ``"soundex"``: Distance based on soundex encoding
:param compare_empty: if set to FALSE, empty strings will be handled as NaNs
:returns: An H2OFrame of the matrix containing element-wise distance between the
strings of this frame and ``y``. The returned frame has the same shape as the input frames.
:examples:
>>> x = h2o.H2OFrame.from_python(['Martha', 'Dwayne', 'Dixon'],
... column_types=['factor'])
>>> y = h2o.H2OFrame.from_python(['Marhta', 'Duane', 'Dicksonx'],
... column_types=['string'])
>>> x.strdistance(y, measure="jw")
"""
assert_is_type(y, H2OFrame)
assert_is_type(measure, Enum('lv', 'lcs', 'qgram', 'jaccard', 'jw', 'soundex'))
assert_is_type(compare_empty, bool)
return H2OFrame._expr(expr=ExprNode("strDistance", self, y, measure, compare_empty))._frame()
def asfactor(self):
"""
Convert columns in the current frame to categoricals.
:returns: new H2OFrame with columns of the "enum" type.
:examples:
>>> h2o = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> h2o['cylinders'] = h2o['cylinders'].asfactor()
>>> h2o['cylinders']
"""
for colname in self.names:
t = self.types[colname]
if t not in {"bool", "int", "string", "enum"}:
raise H2OValueError("Only 'int' or 'string' are allowed for "
"asfactor(), got %s:%s " % (colname, t))
fr = H2OFrame._expr(expr=ExprNode("as.factor", self), cache=self._ex._cache)
if fr._ex._cache.types_valid():
fr._ex._cache.types = {name: "enum" for name in self.types}
else:
raise H2OTypeError("Types are not available in result")
return fr
def isfactor(self):
"""
Test which columns in the current frame are categorical.
:returns: a list of True/False indicating for each column in the frame whether it is categorical.
:examples:
>>> aa = {'h1': [1, 8, 4, 3, 6],
... 'h2': ["fish", "cat", "fish", "dog", "bird"],
... 'h3': [0, 1, 0, 0, 1]}
>>> df_hex = h2o.H2OFrame(aa)
>>> df_hex['h1'].isfactor()
>>> df_hex['h1'] = df_hex['h1'].asfactor()
>>> df_hex['h1'].isfactor()
"""
return [bool(o) for o in ExprNode("is.factor", self)._eager_scalar()]
def anyfactor(self):
"""
Determine if there are any categorical columns in the frame.
:returns: (bool) True if there are any categorical columns in the frame.
:examples:
>>> python_obj = [1,2,2.5,-100.9,0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.anyfactor()
"""
return bool(ExprNode("any.factor", self)._eager_scalar())
def categories(self):
"""
Make a list of levels for an enum (categorical) column. This function can only be applied to single-column categorical frame.
:returns: The list of levels for an enum column.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> category_list = iris['class'].categories()
>>> print(category_list)
"""
if self.ncols != 1:
raise H2OValueError("This operation only applies to a single factor column")
if self.types[self.names[0]] != "enum":
raise H2OValueError("Input is not a factor. This operation only applies to a single factor column")
return self.levels()[0]
def transpose(self):
"""
Transpose rows and columns of this frame.
:returns: new H2OFrame where with rows/columns from the original frame transposed.
:examples:
>>> from random import randrange
>>> import numpy as np
>>> row_num = randrange(1,10)
>>> col_num = randrange(1,10)
>>> python_lists = np.random.randint(-5,5, (row_num, col_num))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> h2oframe.transpose()
"""
return H2OFrame._expr(expr=ExprNode("t", self))
def strsplit(self, pattern):
"""
Split the strings in the target column on the given regular expression pattern.
:param str pattern: The split pattern.
:returns: H2OFrame containing columns of the split strings.
:examples:
>>> frame = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> frame["C5"].strsplit("-")
"""
fr = H2OFrame._expr(expr=ExprNode("strsplit", self, pattern))
fr._ex._cache.nrows = self.nrow
return fr
def tokenize(self, split):
"""
Tokenize String
tokenize() is similar to strsplit(), the difference between them is that tokenize() will store the tokenized
text into a single column making it easier for additional processing (filtering stop words, word2vec algo, ...).
:param tokenize split: The regular expression to tokenize on.
:returns: An H2OFrame with a single column representing the tokenized Strings. Original rows of the input DF are separated by NA.
:examples:
>>> df1 = h2o.H2OFrame.from_python({'String':
... [' this is a string ']})
>>> df1 = df1.ascharacter()
>>> df2 = h2o.H2OFrame.from_python({'String':
... ['this is another string']})
>>> df2 = df2.ascharacter()
>>> df3 = h2o.H2OFrame.from_python({'String':
... ['this is a longer string']})
>>> df3 = df3.ascharacter()
>>> df4 = h2o.H2OFrame.from_python({'String':
... ['this is tall, this is taller']})
>>> df4 = df4.ascharacter()
>>> combined = df1.rbind([df2, df3, df4])
>>> combined
>>> tokenized = combined.tokenize(" ")
>>> tokenized.describe
"""
fr = H2OFrame._expr(expr=ExprNode("tokenize", self, split))
return fr
def countmatches(self, pattern):
"""
For each string in the frame, count the occurrences of the provided pattern. If countmatches is applied to
a frame, all columns of the frame must be type string, otherwise, the returned frame will contain errors.
The pattern here is a plain string, not a regular expression. We will search for the occurrences of the
pattern as a substring in element of the frame. This function is applicable to frames containing only
string or categorical columns.
:param str pattern: The pattern to count matches on in each string. This can also be a list of strings,
in which case all of them will be searched for.
:returns: numeric H2OFrame with the same shape as the original, containing counts of matches of the
pattern for each cell in the original frame.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> result = iris["class"].countmatches("o")
>>> result2 = iris["class"].countmatches("s")
>>> result
>>> result2
"""
assert_is_type(pattern, str, [str])
fr = H2OFrame._expr(expr=ExprNode("countmatches", self, pattern))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncols = self.ncol
return fr
def trim(self):
"""
Trim white space on the left and right of strings in a single-column H2OFrame.
:returns: H2OFrame with trimmed strings.
:examples:
>>> frame = h2o.import_file(("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_trim.csv"),
... col_types=["string","numeric",
... "numeric","numeric",
... "numeric","numeric",
... "numeric","numeric"])
>>> frame["name"].trim()
"""
fr = H2OFrame._expr(expr=ExprNode("trim", self))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncol = self.ncol
return fr
def substring(self, start_index, end_index=None):
"""
For each string, return a new string that is a substring of the original string.
If end_index is not specified, then the substring extends to the end of the original string. If the start_index
is longer than the length of the string, or is greater than or equal to the end_index, an empty string is
returned. Negative start_index is coerced to 0.
:param int start_index: The index of the original string at which to start the substring, inclusive.
:param int end_index: The index of the original string at which to end the substring, exclusive.
:returns: An H2OFrame containing the specified substrings.
:examples:
>>> frame = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> frame["C5"].substring(0,5)
"""
fr = H2OFrame._expr(expr=ExprNode("substring", self, start_index, end_index))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncol = self.ncol
return fr
def lstrip(self, set=" "):
"""
Return a copy of the column with leading characters removed.
The set argument is a string specifying the set of characters to be removed.
If omitted, the set argument defaults to removing whitespace.
:param character set: The set of characters to lstrip from strings in column.
:returns: a new H2OFrame with the same shape as the original frame and having all its values
trimmed from the left (equivalent of Python's ``str.lstrip()``).
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> iris["C5"].lstrip("Iris-")
"""
# work w/ None; parity with python lstrip
if set is None: set = " "
fr = H2OFrame._expr(expr=ExprNode("lstrip", self, set))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncol = self.ncol
return fr
def rstrip(self, set=" "):
"""
Return a copy of the column with trailing characters removed.
The set argument is a string specifying the set of characters to be removed.
If omitted, the set argument defaults to removing whitespace.
:param character set: The set of characters to rstrip from strings in column
:returns: a new H2OFrame with the same shape as the original frame and having all its values
trimmed from the right (equivalent of Python's ``str.rstrip()``).
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> iris.levels()
>>> iris["C5"] = iris["C5"].rstrip("color")
>>> iris["C5"].levels()[0]
"""
# work w/ None; parity with python rstrip
if set is None: set = " "
fr = H2OFrame._expr(expr=ExprNode("rstrip", self, set))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncol = self.ncol
return fr
def entropy(self):
"""
For each string compute its Shannon entropy, if the string is empty the entropy is 0.
:returns: an H2OFrame of Shannon entropies.
:examples:
>>> frame = h2o.H2OFrame.from_python(["redrum"])
>>> frame.entropy()
"""
fr = H2OFrame._expr(expr=ExprNode("entropy", self))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncol = self.ncol
return fr
def num_valid_substrings(self, path_to_words):
"""
For each string, find the count of all possible substrings with 2 characters or more that are contained in
the line-separated text file whose path is given.
:param str path_to_words: Path to file that contains a line-separated list of strings considered valid.
:returns: An H2OFrame with the number of substrings that are contained in the given word list.
:examples:
>>> path = "https://raw.githubusercontent.com/dwyl/english-words/master/words.txt"
# test empty strings
>>> string = h2o.H2OFrame.from_python([''],
... column_types=['string'])
>>> enum = h2o.H2OFrame.from_python([''],
... column_types=['enum'])
>>> string.num_valid_substrings(path)[0,0] == 0
>>> enum.num_valid_substrings(path)[0,0] == 0
"""
assert_is_type(path_to_words, str)
fr = H2OFrame._expr(expr=ExprNode("num_valid_substrings", self, path_to_words))
fr._ex._cache.nrows = self.nrow
fr._ex._cache.ncol = self.ncol
return fr
def nchar(self):
"""
Count the length of each string in a single-column H2OFrame of string type.
:returns: A single-column H2OFrame containing the per-row character count.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader_NA_2.csv")
>>> iris[4].nchar()
"""
return H2OFrame._expr(expr=ExprNode("strlen", self))
def table(self, data2=None, dense=True):
"""
Compute the counts of values appearing in a column, or co-occurence counts between two columns.
:param H2OFrame data2: An optional single column to aggregate counts by.
:param bool dense: If True (default) then use dense representation, which lists only non-zero counts,
1 combination per row. Set to False to expand counts across all combinations.
:returns: H2OFrame of the counts at each combination of factor levels
:examples:
>>> df = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate_cat.csv")
>>> df[['DPROS', 'RACE']].table(data2=None,dense=True)
"""
return H2OFrame._expr(expr=ExprNode("table", self, data2, dense)) if data2 is not None else H2OFrame._expr(
expr=ExprNode("table", self, dense))
def hist(self, breaks="sturges", plot=True, **kwargs):
"""
Compute a histogram over a numeric column.
:param breaks: Can be one of ``"sturges"``, ``"rice"``, ``"sqrt"``, ``"doane"``, ``"fd"``, ``"scott"``;
or a single number for the number of breaks; or a list containing the split points, e.g:
``[-50, 213.2123, 9324834]``. If breaks is "fd", the MAD is used over the IQR in computing bin width.
:param bool plot: If True (default), then a plot will be generated using ``matplotlib``.
:returns: If ``plot`` is False, return H2OFrame with these columns: breaks, counts, mids_true,
mids, and density; otherwise this method draws a plot and returns nothing.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> iris.describe()
>>> iris[0].hist(breaks=5,plot=False)
"""
server = kwargs.pop("server") if "server" in kwargs else False
assert_is_type(breaks, int, [numeric], Enum("sturges", "rice", "sqrt", "doane", "fd", "scott"))
assert_is_type(plot, bool)
assert_is_type(server, bool)
if kwargs:
raise H2OValueError("Unknown parameters to hist(): %r" % kwargs)
hist = H2OFrame._expr(expr=ExprNode("hist", self, breaks))._frame()
if plot:
try:
import matplotlib
if server:
matplotlib.use("Agg")
import matplotlib.pyplot as plt
except ImportError:
print("ERROR: matplotlib is required to make the histogram plot. "
"Set `plot` to False, if a plot is not desired.")
return
hist["widths"] = hist["breaks"].difflag1()
# [2:] because we're removing the title and the first row (which consists of NaNs)
lefts = [float(c[0]) for c in h2o.as_list(hist["breaks"], use_pandas=False)[2:]]
widths = [float(c[0]) for c in h2o.as_list(hist["widths"], use_pandas=False)[2:]]
counts = [float(c[0]) for c in h2o.as_list(hist["counts"], use_pandas=False)[2:]]
plt.xlabel(self.names[0])
plt.ylabel("Frequency")
plt.title("Histogram of %s" % self.names[0])
# matplotlib deprecated "left" arg in 2.1.0 and removed in 3.0.0
version_number = matplotlib.__version__
major = version_number.split('.')[0]
minor = version_number.split('.')[1]
major = int(major)
minor = int(minor)
if major == 2 and minor >= 1 or major >= 3:
plt.bar(x=lefts, width=widths, height=counts, bottom=0)
else:
plt.bar(left=lefts, height=counts, width=widths, bottom=0)
if not server:
plt.show()
else:
hist["density"] = hist["counts"] / (hist["breaks"].difflag1() * hist["counts"].sum())
return hist
def isax(self, num_words, max_cardinality, optimize_card=False, **kwargs):
"""
Compute the iSAX index for DataFrame which is assumed to be numeric time series data.
References:
- http://www.cs.ucr.edu/~eamonn/SAX.pdf
- http://www.cs.ucr.edu/~eamonn/iSAX_2.0.pdf
:param int num_words: Number of iSAX words for the timeseries, i.e. granularity along the time series
:param int max_cardinality: Maximum cardinality of the iSAX word. Each word can have less than the max
:param bool optimized_card: An optimization flag that will find the max cardinality regardless of what is
passed in for ``max_cardinality``.
:returns: An H2OFrame with the name of time series, string representation of iSAX word, followed by
binary representation.
:examples:
>>> df = h2o.create_frame(rows=1,
... cols=256,
... real_fraction=1.0,
... missing_fraction=0.0,
... seed=123)
>>> df2 = df.cumsum(axis=1)
>>> res = df2.isax(num_words=10,max_cardinality=10)
>>> res
"""
if num_words <= 0: raise H2OValueError("num_words must be greater than 0")
if max_cardinality <= 0: raise H2OValueError("max_cardinality must be greater than 0")
return H2OFrame._expr(expr=ExprNode("isax", self, num_words, max_cardinality, optimize_card))
def convert_H2OFrame_2_DMatrix(self, predictors, yresp, h2oXGBoostModel):
'''
This method requires that you import the following toolboxes: xgboost, pandas, numpy and scipy.sparse.
This method will convert an H2OFrame to a DMatrix that can be used by native XGBoost. The H2OFrame contains
numerical and enum columns alone. Note that H2O one-hot-encoding introduces a missing(NA)
column. There can be NAs in any columns.
Follow the steps below to compare H2OXGBoost and native XGBoost:
1. Train the H2OXGBoost model with H2OFrame trainFile and generate a prediction:
h2oModelD = H2OXGBoostEstimator(**h2oParamsD) # parameters specified as a dict()
h2oModelD.train(x=myX, y=y, training_frame=trainFile) # train with H2OFrame trainFile
h2oPredict = h2oPredictD = h2oModelD.predict(trainFile)
2. Derive the DMatrix from H2OFrame:
nativeDMatrix = trainFile.convert_H2OFrame_2_DMatrix(myX, y, h2oModelD)
3. Derive the parameters for native XGBoost:
nativeParams = h2oModelD.convert_H2OXGBoostParams_2_XGBoostParams()
4. Train your native XGBoost model and generate a prediction:
nativeModel = xgb.train(params=nativeParams[0], dtrain=nativeDMatrix, num_boost_round=nativeParams[1])
nativePredict = nativeModel.predict(data=nativeDMatrix, ntree_limit=nativeParams[1].
5. Compare the predictions h2oPredict from H2OXGBoost, nativePredict from native
XGBoost.
:param h2oFrame: H2OFrame to be converted to DMatrix for native XGBoost
:param predictors: List of predictor columns, can be column names or indices
:param yresp: response column, can be column index or name
:param h2oXGBoostModel: H2OXGboost model that are built with the same H2OFrame as input earlier
:return: DMatrix that can be an input to a native XGBoost model
:examples:
>>> import xgboost as xgb
>>> from h2o.estimators.xgboost import *
>>> data = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/jira/adult_data_modified.csv")
>>> data[14] = data[14].asfactor()
>>> myX = list(range(0, 13))
>>> y='income'
>>> h2oParamsD = {"ntrees":30, "max_depth":4, "seed":2,
... "learn_rate":0.7,"col_sample_rate_per_tree" : 0.9,
... "min_rows" : 5, "score_tree_interval": 30+1,
... "tree_method": "exact", "backend":"cpu"}
>>> h2oModelD = H2OXGBoostEstimator(**h2oParamsD)
>>> h2oModelD.train(x=myX, y=y, training_frame=data)
>>> h2oPredictD = h2oModelD.predict(data)
>>> nativeXGBoostParam = h2oModelD.convert_H2OXGBoostParams_2_XGBoostParams()
>>> nativeXGBoostInput = data.convert_H2OFrame_2_DMatrix(myX,
... y,
... h2oModelD)
>>> nativeModel = xgb.train(params=nativeXGBoostParam[0],
... dtrain=nativeXGBoostInput,
... num_boost_round=nativeXGBoostParam[1])
>>> nativePred = nativeModel.predict(data=nativeXGBoostInput,
... ntree_limit=nativeXGBoostParam[1])
'''
import xgboost as xgb
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix
assert isinstance(predictors, list) or isinstance(predictors, tuple)
assert h2oXGBoostModel._model_json['algo'] == 'xgboost', \
"convert_H2OFrame_2_DMatrix is used for H2OXGBoost model only."
tempFrame = self[predictors].cbind(self[yresp])
colnames = tempFrame.names
if type(predictors[0])==type(1): # convert integer indices to column names
temp = []
for colInd in predictors:
temp.append(colnames[colInd])
predictors = temp
if (type(yresp) == type(1)):
tempy = colnames[yresp]
yresp = tempy # column name of response column
enumCols = [] # extract enum columns out to process them
enumColsIndices = [] # store enum column indices
typeDict = self.types
for predName in predictors:
if str(typeDict[predName])=='enum':
enumCols.append(predName)
enumColsIndices.append(colnames.index(predName))
pandaFtrain = tempFrame.as_data_frame(use_pandas=True, header=True)
nrows = tempFrame.nrow
# convert H2OFrame to DMatrix starts here
if len(enumCols) > 0: # enumCols contain all enum column names
allDomain = tempFrame.levels() # list all domain levels with column indices
domainLen = []
for enumIndex in enumColsIndices:
if len(allDomain[enumIndex])>0:
domainLen.append(len(allDomain[enumIndex])*-1)
incLevel = np.argsort(domainLen) # indices of enum column indices with decreasing domain length
# need to move enum columns to the front, highest level first
c2 = tempFrame[enumCols[incLevel[0]]]
tempFrame = tempFrame.drop(enumCols[incLevel[0]])
for index in range(1, len(incLevel)):
c2 = c2.cbind(tempFrame[enumCols[incLevel[index]]])
tempFrame = tempFrame.drop(enumCols[incLevel[index]])
enumCols = c2.names
tempFrame = c2.cbind(tempFrame)
pandaFtrain = tempFrame.as_data_frame(use_pandas=True, header=True) # redo translation from H2O to panda
pandaTrainPart = generatePandaEnumCols(pandaFtrain, enumCols[0], nrows, tempFrame[enumCols[0]].categories())
pandaFtrain.drop([enumCols[0]], axis=1, inplace=True)
for colInd in range(1, len(enumCols)):
cname=enumCols[colInd]
ctemp = generatePandaEnumCols(pandaFtrain, cname, nrows, tempFrame[enumCols[colInd]].categories())
pandaTrainPart=pd.concat([pandaTrainPart, ctemp], axis=1)
pandaFtrain.drop([cname], axis=1, inplace=True)
pandaFtrain = pd.concat([pandaTrainPart, pandaFtrain], axis=1)
c0= tempFrame[yresp].asnumeric().as_data_frame(use_pandas=True, header=True)
pandaFtrain.drop([yresp], axis=1, inplace=True)
pandaF = pd.concat([c0, pandaFtrain], axis=1)
pandaF.rename(columns={c0.columns[0]:yresp}, inplace=True)
newX = list(pandaFtrain.columns.values)
data = pandaF.as_matrix(newX)
label = pandaF.as_matrix([yresp])
return xgb.DMatrix(data=csr_matrix(data), label=label) \
if h2oXGBoostModel._model_json['output']['sparse'] else xgb.DMatrix(data=data, label=label)
def pivot(self, index, column, value):
"""
Pivot the frame designated by the three columns: index, column, and value. Index and column should be
of type enum, int, or time.
For cases of multiple indexes for a column label, the aggregation method is to pick the first occurrence in the data frame.
:param index: Index is a column that will be the row label
:param column: The labels for the columns in the pivoted Frame
:param value: The column of values for the given index and column label
:returns: Returns a new H2OFrame with pivoted columns.
:examples:
>>> df = h2o.create_frame(rows=1000000,
... cols=3,
... factors=10,
... categorical_fraction=1.0/3,
... time_fraction=1.0/3,
... real_fraction=1.0/3,
... real_range=100,
... missing_fraction=0.0,
... seed=1234)
>>> pdf = df.as_data_frame()
>>> ppdf = pdf.pivot(values="C3",index="C1",columns="C2")
>>> ppdf = ppdf.fillna(0.0)
>>> ppdfh2o = h2o.H2OFrame(ppdf)
>>> ppdfh2o
"""
assert_is_type(index, str)
assert_is_type(column, str)
assert_is_type(value, str)
col_names = self.names
if index not in col_names:
raise H2OValueError("Index not in H2OFrame")
if column not in col_names:
raise H2OValueError("Column not in H2OFrame")
if value not in col_names:
raise H2OValueError("Value column not in H2OFrame")
if self.type(column) not in ["enum","time","int"]:
raise H2OValueError("'column' argument is not type enum, time or int")
if self.type(index) not in ["enum","time","int"]:
raise H2OValueError("'index' argument is not type enum, time or int")
return H2OFrame._expr(expr=ExprNode("pivot",self,index,column,value))
def melt(self, id_vars, value_vars=None, var_name="variable", value_name="value", skipna=False):
"""
Converts an H2OFrame to key-value representation while optionally skipping NA values.
Inverse operation to pivot.
:param id_vars: Columns used as identifiers.
:param value_vars: What columns will be converted to key-value pairs (default: complement to id_vars).
:param var_name: Name of the key-column (default: "variable").
:param value_name: Name of the value-column (default: "value").
:param skipna: If enabled, do not include NAs in the result.
:returns: Returns an unpivoted H2OFrame.
:examples:
>>> import pandas as pd
>>> from h2o.frame import H2OFrame
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
>>> frozen_h2o = H2OFrame(df)
>>> frozen_h2o
>>> melted = frozen_h2o.melt(id_vars=["A"], value_vars=["B"])
>>> melted
"""
assert_is_type(id_vars, [str])
assert_is_type(value_vars, [str], None)
assert_is_type(var_name, str)
assert_is_type(value_name, str)
assert_is_type(skipna, bool)
return H2OFrame._expr(expr=ExprNode("melt", self, id_vars, value_vars, var_name, value_name, skipna))
def rank_within_group_by(self, group_by_cols, sort_cols, ascending=[], new_col_name="New_Rank_column", sort_cols_sorted=False):
"""
This function will add a new column rank where the ranking is produced as follows:
1. Sorts the H2OFrame by columns sorted in by columns specified in group_by_cols and sort_cols in the directions
specified by the ascending for the sort_cols. The sort directions for the group_by_cols are ascending only.
2. A new rank column is added to the frame which will contain a rank assignment performed next. The user can
choose to assign a name to this new column. The default name is New_Rank_column.
3. For each groupby groups, a rank is assigned to the row starting from 1, 2, ... to the end of that
group.
4. If sort_cols_sorted is TRUE, a final sort on the frame will be performed frame according to the sort_cols and
the sort directions in ascending. If sort_cols_sorted is FALSE (by default), the frame from step 3 will be
returned as is with no extra sort. This may provide a small speedup if desired.
:param group_by_cols: The columns to group on (either a single column name/index, or a list of column names
or column indices
:param sort_cols: The columns to sort on (either a single column name/index, or a list of column names or
column indices
:param ascending: Optional Boolean array to denote sorting direction for each sorting column. True for
ascending, False for descending. Default is ascending sort. Sort direction for enums will be ignored.
:param new_col_name: Optional String to denote the new column names. Default to New_Rank_column.
:param sort_cols_sorted: Optional Boolean to denote if the returned frame should be sorted according to sort_cols
and sort directions specified in ascending. Default is False.
:returns: A new Frame with new rank (sorted by columns in sort_cols) column within the grouping
specified by the group_by_cols.
:examples:
>>> air = h2o.import_file("https://s3.amazonaws.com/h2o-airlines-unpacked/allyears2k.csv")
# slice out all but the following five columns
>>> df = air[:, ["ArrDelay", "DepDelay", "Origin", "Dest", "Distance"]]
# group by "Distance" and sort by "Origin"
>>> ranked1 = df.rank_within_group_by(group_by_cols="Distance", sort_cols="Origin")
# group by "ArrDelay" and sort by "Origin"
>>> ranked2 = df.rank_within_group_by(group_by_cols="ArrDelay", sort_cols="Origin")
# group by "DepDelay" and sort by "Dest"
>>> ranked3 = df.rank_within_group_by(group_by_cols="DepDelay", sort_cols="Dest")
"""
assert_is_type(group_by_cols, str, int, [str, int])
if type(group_by_cols) != list: group_by_cols = [group_by_cols]
if type(sort_cols) != list: sort_cols = [sort_cols]
if type(ascending) != list: ascending = [ascending] # convert to list
ascendingI=[1]*len(sort_cols) # intitalize sorting direction to ascending by default
for c in sort_cols:
if self.type(c) not in ["enum","time","int","real"]:
raise H2OValueError("Sort by column: " + str(c) + " not of enum, time, int or real type")
for c in group_by_cols:
if self.type(c) not in ["enum","time","int","real"]:
raise H2OValueError("Group by column: " + str(c) + " not of enum, time, int or real type")
if len(ascending)>0: # user specify sort direction, assume all columns ascending
assert len(ascending)==len(sort_cols), "Sorting direction must be specified for each sorted column."
for index in range(len(sort_cols)):
ascendingI[index]=1 if ascending[index] else -1
finalSortedOrder=0
if (sort_cols_sorted):
finalSortedOrder=1
return H2OFrame._expr(expr=ExprNode("rank_within_groupby",self,group_by_cols,sort_cols,ascendingI,new_col_name, finalSortedOrder))
def topNBottomN(self, column=0, nPercent=10, grabTopN=-1):
"""
Given a column name or one column index, a percent N, this function will return the top or bottom N% of the
values of the column of a frame. The column must be a numerical column.
:param column: a string for column name or an integer index
:param nPercent: a top or bottom percentage of the column values to return
:param grabTopN: -1 to grab bottom N percent and 1 to grab top N percent
:returns: a H2OFrame containing two columns. The first column contains the original row indices where
the top/bottom values are extracted from. The second column contains the values.
:examples:
>>> import numpy as np
>>> from random import randint
>>> dataFrame = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/jira/TopBottomNRep4.csv.zip")
>>> topAnswer = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/jira/Top20Per.csv.zip")
>>> bottomAnswer = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/jira/Bottom20Per.csv.zip")
>>> nPercentages = [1,2,3,4]
>>> frameNames = dataFrame.names
>>> tolerance=1e-12
>>> nsample=100
>>> nP = nPercentages[randint(0, len(nPercentages)-1)]
>>> colIndex = randint(0, len(frameNames)-2)
>>> dataFrame.topNBottomN(frameNames[colIndex], nP, grabTopN=1)
>>> dataFrame.topNBottomN(frameNames[colIndex], nP, grabTopN=-1)
"""
assert (nPercent >= 0) and (nPercent<=100.0), "nPercent must be between 0.0 and 100.0"
assert round(nPercent*0.01*self.nrows)>0, "Increase nPercent. Current value will result in top 0 row."
if isinstance(column, int):
if (column < 0) or (column>=self.ncols):
raise H2OValueError("Invalid column index H2OFrame")
else:
colIndex = column
else: # column is a column name
col_names = self.names
if column not in col_names:
raise H2OValueError("Column name not found H2OFrame")
else:
colIndex = col_names.index(column)
if not(self[colIndex].isnumeric()):
raise H2OValueError("Wrong column type! Selected column must be numeric.")
return H2OFrame._expr(expr=ExprNode("topn", self, colIndex, nPercent, grabTopN))
def topN(self, column=0, nPercent=10):
"""
Given a column name or one column index, a percent N, this function will return the top N% of the values
of the column of a frame. The column must be a numerical column.
:param column: a string for column name or an integer index
:param nPercent: a top percentage of the column values to return
:returns: a H2OFrame containing two columns. The first column contains the original row indices where
the top values are extracted from. The second column contains the top nPercent values.
:examples:
>>> import numpy as np
>>> from random import randint
>>> dataFrame = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/jira/TopBottomNRep4.csv.zip")
>>> topAnswer = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/jira/Top20Per.csv.zip")
>>> nPercentages = [1,2,3,4]
>>> frameNames = dataFrame.names
>>> tolerance=1e-12
>>> nsample=100
>>> nP = nPercentages[randint(0, len(nPercentages)-1)]
>>> colIndex = randint(0, len(frameNames)-2)
>>> dataFrame.topN(frameNames[colIndex], nP)
"""
return self.topNBottomN(column, nPercent, 1)
def bottomN(self, column=0, nPercent=10):
"""
Given a column name or one column index, a percent N, this function will return the bottom N% of the values
of the column of a frame. The column must be a numerical column.
:param column: a string for column name or an integer index
:param nPercent: a bottom percentage of the column values to return
:returns: a H2OFrame containing two columns. The first column contains the original row indices where
the bottom values are extracted from. The second column contains the bottom nPercent values.
:examples:
>>> import numpy as np
>>> from random import randint
>>> dataFrame = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/jira/TopBottomNRep4.csv.zip")
>>> bottomAnswer = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/jira/Bottom20Per.csv.zip")
>>> nPercentages = [1,2,3,4]
>>> frameNames = dataFrame.names
>>> tolerance=1e-12
>>> nsample=100
>>> nP = nPercentages[randint(0, len(nPercentages)-1)]
>>> colIndex = randint(0, len(frameNames)-2)
>>> dataFrame.bottomN(frameNames[colIndex], nP)
"""
return self.topNBottomN(column, nPercent, -1)
def sub(self, pattern, replacement, ignore_case=False):
"""
Substitute the first occurrence of pattern in a string with replacement.
:param str pattern: A regular expression.
:param str replacement: A replacement string.
:param bool ignore_case: If True then pattern will match case-insensitively.
:returns: an H2OFrame with all values matching ``pattern`` replaced with ``replacement``.
:examples:
>>> frame = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> frame["C5"].sub('s', 'z', ignore_case=False)
"""
return H2OFrame._expr(expr=ExprNode("replacefirst", self, pattern, replacement, ignore_case))
def gsub(self, pattern, replacement, ignore_case=False):
"""
Globally substitute occurrences of pattern in a string with replacement.
:param str pattern: A regular expression.
:param str replacement: A replacement string.
:param bool ignore_case: If True then pattern will match case-insensitively.
:returns: an H2OFrame with all occurrences of ``pattern`` in all values replaced with ``replacement``.
:examples:
>>> iris = h2o.import_file(("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv"),
... col_types=["numeric","numeric",
... "numeric","numeric",
... "string"])
>>> iris["C5"].gsub("s","z",ignore_case=False)
"""
return H2OFrame._expr(expr=ExprNode("replaceall", self, pattern, replacement, ignore_case))
def interaction(self, factors, pairwise, max_factors, min_occurrence, destination_frame=None):
"""
Categorical Interaction Feature Creation in H2O.
Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by
the user.
:param factors: list of factor columns (either indices or column names).
:param bool pairwise: Whether to create pairwise interactions between factors (otherwise create one
higher-order interaction). Only applicable if there are 3 or more factors.
:param int max_factors: Max. number of factor levels in pair-wise interaction terms (if enforced, one extra
catch-all factor will be made).
:param int min_occurrence: Min. occurrence threshold for factor levels in pair-wise interaction terms.
:param str destination_frame: (internal) string indicating the key for the frame created.
:returns: an H2OFrame
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> iris = iris.cbind(iris[4] == "Iris-setosa")
>>> iris[5] = iris[5].asfactor()
>>> iris.set_name(5,"C6")
>>> iris = iris.cbind(iris[4] == "Iris-virginica")
>>> iris[6] = iris[6].asfactor()
>>> iris.set_name(6, name="C7")
>>> two_way_interactions = h2o.interaction(iris,
... factors=[4,5,6],
... pairwise=True,
... max_factors=10000,
... min_occurrence=1)
>>> two_way_interactions
"""
return h2o.interaction(data=self, factors=factors, pairwise=pairwise, max_factors=max_factors,
min_occurrence=min_occurrence, destination_frame=destination_frame)
def toupper(self):
"""
Translate characters from lower to upper case for a particular column.
:returns: new H2OFrame with all strings in the current frame converted to the uppercase.
:examples:
>>> frame = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> frame["C5"]
>>> frame["C5"].toupper()
"""
return H2OFrame._expr(expr=ExprNode("toupper", self), cache=self._ex._cache)
def grep(self,pattern, ignore_case = False, invert = False, output_logical = False):
"""
Searches for matches to argument `pattern` within each element
of a string column.
Default behavior is to return indices of the elements matching the pattern. Parameter
`output_logical` can be used to return a logical vector indicating if the element matches
the pattern (1) or not (0).
:param str pattern: A character string containing a regular expression.
:param bool ignore_case: If True, then case is ignored during matching.
:param bool invert: If True, then identify elements that do not match the pattern.
:param bool output_logical: If True, then return logical vector of indicators instead of list of matching positions
:return: H2OFrame holding the matching positions or a logical list if `output_logical` is enabled.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> pattern = "Iris-setosa"
>>> iris["class"].grep(pattern, output_logical=True)
"""
return H2OFrame._expr(expr=ExprNode("grep", self, pattern, ignore_case, invert, output_logical))
def tolower(self):
"""
Translate characters from upper to lower case for a particular column.
:returns: new H2OFrame with all strings in the current frame converted to the lowercase.
:examples:
>>> frame = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> frame["C5"]
>>> frame["C5"].tolower()
"""
return H2OFrame._expr(expr=ExprNode("tolower", self), cache=self._ex._cache)
def rep_len(self, length_out):
"""
Create a new frame replicating the current frame.
If the source frame has a single column, then the new frame will be replicating rows and its dimensions
will be ``length_out x 1``. However if the source frame has more than 1 column, then then new frame
will be replicating data in columnwise direction, and its dimensions will be ``nrows x length_out``,
where ``nrows`` is the number of rows in the source frame. Also note that if ``length_out`` is smaller
than the corresponding dimension of the source frame, then the new frame will actually be a truncated
version of the original.
:param int length_out: Number of columns (rows) of the resulting H2OFrame
:returns: new H2OFrame with repeated data from the current frame.
:examples:
>>> from random import randrange
>>> import numpy as np
>>> import math
>>> row_num = randrange(1,10)
>>> col_num = randrange(1,10)
>>> length_out_r = math.ceil(0.78*row_num)
>>> python_lists = np.random.randint(-5,5, (row_num, col_num))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> h2oframe
>>> one_column = h2oframe[0].rep_len(length_out=(length_out_r+row_num))
>>> one_column
"""
return H2OFrame._expr(expr=ExprNode("rep_len", self, length_out))
def scale(self, center=True, scale=True):
"""
Center and/or scale the columns of the current frame.
:param center: If True, then demean the data. If False, no shifting is done. If ``center`` is a list of
numbers then shift each column by the corresponding amount.
:param scale: If True, then scale the data by each column's standard deviation. If False, no scaling
is done. If ``scale`` is a list of numbers, then scale each column by the requested amount.
:returns: an H2OFrame with scaled values from the current frame.
:examples:
>>> import numpy as np
>>> python_lists = np.random.uniform(1, 10, (10000,2))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> h2oframe
>>> newframe = h2oframe.scale(center=True, scale=True)
>>> newframe
"""
return H2OFrame._expr(expr=ExprNode("scale", self, center, scale), cache=self._ex._cache)
def signif(self, digits=6):
"""
Round doubles/floats to the given number of significant digits.
:param int digits: Number of significant digits to retain.
:returns: new H2OFrame with rounded values from the original frame.
:examples:
>>> data = [[0.2348, 1.2380, 8.9032134],
... [4.321321, 4.907432, 6.3]]
>>> h2o_data = h2o.H2OFrame(data)
>>> h2o_data
>>> h2o_data.signif(digits = 2)
"""
return H2OFrame._expr(expr=ExprNode("signif", self, digits), cache=self._ex._cache)
def round(self, digits=0):
"""
Round doubles/floats to the given number of decimal places.
:param int digits: The number of decimal places to retain. Rounding to a negative number of decimal places is
not supported. For rounding we use the "round half to even" mode (IEC 60559 standard), so that
``round(2.5) = 2`` and ``round(3.5) = 4``.
:returns: new H2OFrame with rounded values from the original frame.
:examples:
>>> data = [[0.2348, 1.2380, 8.9032134],
... [4.321321, 4.907432, 6.3]]
>>> h2o_data = h2o.H2OFrame(data)
>>> h2o_data.round(digits = 4)
>>> h2o_data.round(digits = 0)
"""
return H2OFrame._expr(expr=ExprNode("round", self, digits), cache=self._ex._cache)
def asnumeric(self):
"""
Create a new frame with all columns converted to numeric.
:returns: New frame with all columns converted to numeric.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars.asnumeric()
"""
fr = H2OFrame._expr(expr=ExprNode("as.numeric", self), cache=self._ex._cache)
if fr._ex._cache.types_valid():
fr._ex._cache.types = {k: "real" for k in fr._ex._cache.types.keys()}
return fr
def ascharacter(self):
"""
Convert all columns in the frame into strings.
:returns: new H2OFrame with columns of "string" type.
:examples:
>>> h2o = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> h2o['cylinders'] = h2o['cylinders'].asfactor()
>>> h2o['cylinders'].ascharacter()
"""
fr = H2OFrame._expr(expr=ExprNode("as.character", self), cache=self._ex._cache)
if fr._ex._cache.types_valid():
fr._ex._cache.types = {k: "string" for k in fr._ex._cache.types.keys()}
return fr
def na_omit(self):
"""
Remove rows with NAs from the H2OFrame.
:returns: new H2OFrame with all rows from the original frame containing any NAs removed.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader_NA_2.csv")
>>> iris
>>> newframe=iris.na_omit()
>>> newframe
"""
fr = H2OFrame._expr(expr=ExprNode("na.omit", self), cache=self._ex._cache)
fr._ex._cache.nrows = -1
return fr
def difflag1(self):
"""
Conduct a diff-1 transform on a numeric frame column.
:returns: an H2OFrame where each element is equal to the corresponding element in the source
frame minus the previous-row element in the same frame.
:examples:
>>> import pandas as pd
>>> import numpy as np
>>> df = pd.DataFrame(np.random.randint(0,100,size=(1000000, 1)),
... columns=list('A'))
>>> df_diff = df.diff()
>>> df_diff_h2o = h2o.H2OFrame(df_diff)
>>> fr = h2o.H2OFrame(df)
>>> fr_diff = fr.difflag1()
>>> fr_diff
"""
if self.ncols > 1:
raise H2OValueError("Only single-column frames supported")
if self.types[self.columns[0]] not in {"real", "int", "bool"}:
raise H2OValueError("Numeric column expected")
fr = H2OFrame._expr(expr=ExprNode("difflag1", self), cache=self._ex._cache)
return fr
def isna(self):
"""
For each element in an H2OFrame, determine if it is NA or not.
:returns: an H2OFrame of 1s and 0s, where 1s mean the values were NAs.
:examples:
>>> from collections import OrderedDict
>>> frame = h2o.H2OFrame.from_python(OrderedDict([
... ("A", [1, 0, 3, 4, 8, 4, 7]),
... ("B", [2, nan, -1, nan, nan, 9, 0]),
... ("C", ["one", "", "two", "", "seventeen", "1", ""]),
... ("D", ["oneteen", "", "twoteen", "", "sixteen", "twenteen", ""])
... ]), na_strings=[""],
... column_types={"C": "enum", "D": "string"})
>>> frame.isna()
"""
fr = H2OFrame._expr(expr=ExprNode("is.na", self))
fr._ex._cache.nrows = self._ex._cache.nrows
fr._ex._cache.ncols = self._ex._cache.ncols
if self._ex._cache.names:
fr._ex._cache.names = ["isNA(%s)" % n for n in self._ex._cache.names]
fr._ex._cache.types = {"isNA(%s)" % n: "int" for n in self._ex._cache.names}
return fr
def year(self):
"""
Extract the "year" part from a date column.
:returns: a single-column H2OFrame containing the "year" part from the source frame.
:examples:
>>> df = h2o.create_frame(rows=10,
... cols=3,
... factors=10,
... categorical_fraction=1.0/3,
... time_fraction=1.0/3,
... real_fraction=1.0/3,
... real_range=100,
... missing_fraction=0.0,
... seed=123)
>>> df["C1"].year()
"""
fr = H2OFrame._expr(expr=ExprNode("year", self), cache=self._ex._cache)
if fr._ex._cache.types_valid():
fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()}
return fr
def month(self):
"""
Extract the "month" part from a date column.
:returns: a single-column H2OFrame containing the "month" part from the source frame.
:examples:
>>> df = h2o.create_frame(rows=10,
... cols=3,
... factors=10,
... categorical_fraction=1.0/3,
... time_fraction=1.0/3,
... real_fraction=1.0/3,
... real_range=100,
... missing_fraction=0.0,
... seed=123)
>>> df["C1"].month()
"""
fr = H2OFrame._expr(expr=ExprNode("month", self), cache=self._ex._cache)
if fr._ex._cache.types_valid():
fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()}
return fr
def week(self):
"""
Extract the "week" part from a date column.
:returns: a single-column H2OFrame containing the "week" part from the source frame.
:examples:
>>> df = h2o.create_frame(rows=10,
... cols=3,
... factors=10,
... categorical_fraction=1.0/3,
... time_fraction=1.0/3,
... real_fraction=1.0/3,
... real_range=100,
... missing_fraction=0.0,
... seed=123)
>>> df["C1"].week()
"""
fr = H2OFrame._expr(expr=ExprNode("week", self), cache=self._ex._cache)
if fr._ex._cache.types_valid():
fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()}
return fr
def day(self):
"""
Extract the "day" part from a date column.
:returns: a single-column H2OFrame containing the "day" part from the source frame.
:examples:
>>> df = h2o.create_frame(rows=10,
... cols=3,
... factors=10,
... categorical_fraction=1.0/3,
... time_fraction=1.0/3,
... real_fraction=1.0/3,
... real_range=100,
... missing_fraction=0.0,
... seed=123)
>>> df["C1"].day()
"""
fr = H2OFrame._expr(expr=ExprNode("day", self), cache=self._ex._cache)
if fr._ex._cache.types_valid():
fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()}
return fr
def dayOfWeek(self):
"""
Extract the "day-of-week" part from a date column.
:returns: a single-column H2OFrame containing the "day-of-week" part from the source frame.
:examples:
>>> df = h2o.create_frame(rows=10,
... cols=3,
... factors=10,
... categorical_fraction=1.0/3,
... time_fraction=1.0/3,
... real_fraction=1.0/3,
... real_range=100,
... missing_fraction=0.0,
... seed=123)
>>> df["C1"].dayOfWeek()
"""
fr = H2OFrame._expr(expr=ExprNode("dayOfWeek", self), cache=self._ex._cache)
if fr._ex._cache.types_valid():
fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()}
return fr
def hour(self):
"""
Extract the "hour-of-day" part from a date column.
:returns: a single-column H2OFrame containing the "hour-of-day" part from the source frame.
:examples:
>>> df = h2o.create_frame(rows=10,
... cols=3,
... factors=10,
... categorical_fraction=1.0/3,
... time_fraction=1.0/3,
... real_fraction=1.0/3,
... real_range=100,
... missing_fraction=0.0,
... seed=123)
>>> df["C1"].hour()
"""
fr = H2OFrame._expr(expr=ExprNode("hour", self), cache=self._ex._cache)
if fr._ex._cache.types_valid():
fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()}
return fr
def minute(self):
"""
Extract the "minute" part from a date column.
:returns: a single-column H2OFrame containing the "minute" part from the source frame.
:examples:
>>> df = h2o.create_frame(rows=10,
... cols=3,
... factors=10,
... categorical_fraction=1.0/3,
... time_fraction=1.0/3,
... real_fraction=1.0/3,
... real_range=100,
... missing_fraction=0.0,
... seed=123)
>>> df["C1"].minute()
"""
fr = H2OFrame._expr(expr=ExprNode("minute", self), cache=self._ex._cache)
if fr._ex._cache.types_valid():
fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()}
return fr
def second(self):
"""
Extract the "second" part from a date column.
:returns: a single-column H2OFrame containing the "second" part from the source frame.
:examples:
>>> df = h2o.create_frame(rows=10,
... cols=3,
... factors=10,
... categorical_fraction=1.0/3,
... time_fraction=1.0/3,
... real_fraction=1.0/3,
... real_range=100,
... missing_fraction=0.0,
... seed=123)
>>> df["C1"].second()
"""
fr = H2OFrame._expr(expr=ExprNode("second", self), cache=self._ex._cache)
if fr._ex._cache.types_valid():
fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()}
return fr
def runif(self, seed=None):
"""
Generate a column of random numbers drawn from a uniform distribution [0,1) and
having the same data layout as the source frame.
:param int seed: seed for the random number generator.
:returns: Single-column H2OFrame filled with doubles sampled uniformly from [0,1).
:examples:
>>> import numpy as np
>>> python_lists = np.random.uniform(0,1, 10000)
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> h2oframe.runif(seed=None)
"""
fr = H2OFrame._expr(expr=ExprNode("h2o.runif", self, -1 if seed is None else seed))
fr._ex._cache.ncols = 1
fr._ex._cache.nrows = self.nrow
return fr
def stratified_split(self, test_frac=0.2, seed=-1):
"""
Construct a column that can be used to perform a random stratified split.
:param float test_frac: The fraction of rows that will belong to the "test".
:param int seed: The seed for the random number generator.
:returns: an H2OFrame having single categorical column with two levels: ``"train"`` and ``"test"``.
:examples:
>>> import numpy as np
>>> python_lists = np.random.randint(-3,3, (10000,2))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists).asfactor()
>>> h2oframe[1].stratified_split(test_frac=0.2, seed=-1)
"""
return H2OFrame._expr(expr=ExprNode('h2o.random_stratified_split', self, test_frac, seed))
def match(self, table, nomatch=0):
"""
Make a vector of the positions of (first) matches of its first argument in its second.
Only applicable to single-column categorical/string frames.
:param List table: the list of items to match against
:param int nomatch: value that should be returned when there is no match.
:returns: a new H2OFrame containing for each cell from the source frame the index where
the pattern ``table`` first occurs within that cell.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv")
>>> matchFrame = iris["C5"].match(['Iris-versicolor'])
>>> matchFrame
>>> matchFrame = iris["C5"].match(['Iris-setosa'])
>>> matchFrame
"""
return H2OFrame._expr(expr=ExprNode("match", self, table, nomatch, None))
def cut(self, breaks, labels=None, include_lowest=False, right=True, dig_lab=3):
"""
Cut a numeric vector into categorical "buckets".
This method is only applicable to a single-column numeric frame.
:param List[float] breaks: The cut points in the numeric vector.
:param List[str] labels: Labels for categorical levels produced. Defaults to set notation of
intervals defined by the breaks.
:param bool include_lowest: By default, cuts are defined as intervals ``(lo, hi]``. If this parameter
is True, then the interval becomes ``[lo, hi]``.
:param bool right: Include the high value: ``(lo, hi]``. If False, get ``(lo, hi)``.
:param int dig_lab: Number of digits following the decimal point to consider.
:returns: Single-column H2OFrame of categorical data.
:examples:
>>> import numpy as np
>>> python_lists = np.random.uniform(-2,2,(100,1))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> breaks = [-2,1,0,1,2]
>>> newframe = h2oframe.cut(breaks,
... labels=None,
... include_lowest=False,
... right=True,
... dig_lab=3)
>>> newframe
"""
assert_is_type(breaks, [numeric])
if self.ncols != 1: raise H2OValueError("Single-column frame is expected")
if self.types[self.names[0]] not in {"int", "real"}: raise H2OValueError("A numeric column is expected")
fr = H2OFrame._expr(expr=ExprNode("cut", self, breaks, labels, include_lowest, right, dig_lab),
cache=self._ex._cache)
fr._ex._cache.types = {k: "enum" for k in self.names}
return fr
def which(self):
"""
Compose the list of row indices for which the frame contains non-zero values.
Only applicable to integer single-column frames.
Equivalent to comprehension ``[index for index, value in enumerate(self) if value]``.
:returns: a new single-column H2OFrame containing indices of those rows in the original frame
that contained non-zero values.
:examples:
>>> import numpy as np
>>> python_lists = np.random.randint(1,5, (100,1))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> h2oframe.which()
"""
return H2OFrame._expr(expr=ExprNode("which", self))
def idxmax(self,skipna=True, axis=0):
"""
Get the index of the max value in a column or row
:param bool skipna: If True (default), then NAs are ignored during the search. Otherwise presence
of NAs renders the entire result NA.
:param int axis: Direction of finding the max index. If 0 (default), then the max index is searched columnwise, and the
result is a frame with 1 row and number of columns as in the original frame. If 1, then the max index is searched
rowwise and the result is a frame with 1 column, and number of rows equal to the number of rows in the original frame.
:returns: either a list of max index values per-column or an H2OFrame containing max index values
per-row from the original frame.
:examples:
>>> f1 = h2o.create_frame(rows = 10000,
... cols = 100,
... categorical_fraction = 0,
... missing_fraction = 0,
... seed=1234)
>>> f1.idxmax()
"""
return H2OFrame._expr(expr=ExprNode("which.max", self, skipna, axis))
def idxmin(self,skipna=True, axis=0):
"""
Get the index of the min value in a column or row
:param bool skipna: If True (default), then NAs are ignored during the search. Otherwise presence
of NAs renders the entire result NA.
:param int axis: Direction of finding the min index. If 0 (default), then the min index is searched columnwise, and the
result is a frame with 1 row and number of columns as in the original frame. If 1, then the min index is searched
rowwise and the result is a frame with 1 column, and number of rows equal to the number of rows in the original frame.
:returns: either a list of min index values per-column or an H2OFrame containing min index values
per-row from the original frame.
:examples:
>>> f1 = h2o.create_frame(rows = 10000,
... cols = 100,
... categorical_fraction = 0,
... missing_fraction = 0,
... seed=1234)
>>> f1.idxmin()
"""
return H2OFrame._expr(expr=ExprNode("which.min", self, skipna, axis))
def ifelse(self, yes, no):
"""
Equivalent to ``[y if t else n for t,y,n in zip(self,yes,no)]``.
Based on the booleans in the test vector, the output has the values of the
yes and no vectors interleaved (or merged together). All Frames must have
the same row count. Single column frames are broadened to match wider
Frames. Scalars are allowed, and are also broadened to match wider frames.
:param yes: Frame to use if ``test`` is true; may be a scalar or single column
:param no: Frame to use if ``test`` is false; may be a scalar or single column
:returns: an H2OFrame of the merged yes/no frames/scalars according to the test input frame.
:examples:
>>> import numpy as np
>>> from h2o.frame import H2OFrame
>>> python_lists = np.random.uniform(-1,1, (5,5))
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists)
>>> newFrame = (h2oframe>0).ifelse(1, -1)
>>> newFrame
"""
return H2OFrame._expr(expr=ExprNode("ifelse", self, yes, no))
def apply(self, fun=None, axis=0):
"""
Apply a lambda expression to an H2OFrame.
:param fun: a lambda expression to be applied per row or per column.
:param axis: 0 = apply to each column; 1 = apply to each row
:returns: a new H2OFrame with the results of applying ``fun`` to the current frame.
:examples:
>>> python_lists = [[1,2,3,4], [1,2,3,4]]
>>> h2oframe = h2o.H2OFrame(python_obj=python_lists,
... na_strings=['NA'])
>>> colMean = h2oframe.apply(lambda x: x.mean(), axis=0)
>>> rowMean = h2oframe.apply(lambda x: x.mean(), axis=1)
>>> colMean
>>> rowMean
"""
from .astfun import lambda_to_expr
assert_is_type(axis, 0, 1)
assert_is_type(fun, FunctionType)
assert_satisfies(fun, fun.__name__ == "<lambda>")
res = lambda_to_expr(fun)
return H2OFrame._expr(expr=ExprNode("apply", self, 1 + (axis == 0), *res))
#-------------------------------------------------------------------------------------------------------------------
# Synonyms + Deprecated
#-------------------------------------------------------------------------------------------------------------------
# Here we have all methods that are provided as alternative names to some other names defined above. This also
# includes methods that we rename as part of the deprecation process (but keeping the old name for the sake of
# backward compatibility). We gather them all down here to have a slightly cleaner code.
@staticmethod
def mktime(year=1970, month=0, day=0, hour=0, minute=0, second=0, msec=0):
"""
Deprecated, use :func:`moment` instead.
This function was left for backward-compatibility purposes only. It is
not very stable, and counterintuitively uses 0-based months and days,
so "January 4th, 2001" should be entered as ``mktime(2001, 0, 3)``.
"""
return H2OFrame._expr(ExprNode("mktime", year, month, day, hour, minute, second, msec))
@property
def columns(self):
"""
Displays the column names. Same as ``self.names``.
:returns: Column names.
:examples:
>>> python_obj = [1,2,2.5,-100.9,0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.columns
"""
return self.names
@columns.setter
def columns(self, value):
self.set_names(value)
@property
def col_names(self):
"""
Displays the column names. Same as ``self.names``.
:returns: Column names.
:examples:
>>> python_obj = [1,2,2.5,-100.9,0]
>>> frame = h2o.H2OFrame(python_obj)
>>> frame.col_names
"""
return self.names
@col_names.setter
def col_names(self, value):
self.set_names(value)
def __len__(self):
"""Number of rows in the dataframe, same as ``self.nrows``."""
return self.nrows
@property
def nrow(self):
"""
Same as ``self.nrows``.
:returns: Number of rows in the dataframe.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader_NA_2.csv")
>>> iris.nrow
"""
return self.nrows
@property
def ncol(self):
"""
Same as ``self.ncols``.
:returns: Number of columns in the dataframe.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader_NA_2.csv")
>>> iris.ncol
"""
return self.ncols
@property
def dim(self):
"""
Gives the dimensions of the frame. Same as ``list(self.shape)``.
:returns: Frame dimensions.
:examples:
>>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv")
>>> iris.dim
"""
return [self.nrow, self.ncol]
#@property
#def frame_id(self):
# """Same as ``frame.id``."""
# return self.id
#@frame_id.setter
#def frame_id(self, value):
# self.id = value
@staticmethod
def from_python(python_obj, destination_frame=None, header=0, separator=",", column_names=None,
column_types=None, na_strings=None):
"""[DEPRECATED] Use constructor ``H2OFrame()`` instead."""
return H2OFrame(python_obj, destination_frame, header, separator, column_names, column_types,
na_strings)
def ischaracter(self):
"""[DEPRECATED] Use ``frame.isstring()``."""
return self.isstring()
#-----------------------------------------------------------------------------------------------------------------------
# Helpers
#-----------------------------------------------------------------------------------------------------------------------
def _getValidCols(by_idx, fr): # so user can input names of the columns as well is idx num
tmp = []
for i in by_idx:
if type(i) == str:
if i not in fr.names:
raise H2OValueError("Column: " + i + " not in frame.")
tmp.append(fr.names.index(i))
elif type(i) != int:
raise H2OValueError("Join on column: " + i + " not of type int")
else:
tmp.append(i)
return list(set(tmp))
def _binop(lhs, op, rhs, rtype=None):
assert_is_type(lhs, str, numeric, datetime.date, pandas_timestamp, numpy_datetime, H2OFrame)
assert_is_type(rhs, str, numeric, datetime.date, pandas_timestamp, numpy_datetime, H2OFrame)
if isinstance(lhs, H2OFrame) and isinstance(rhs, H2OFrame) and lhs._is_frame and rhs._is_frame:
lrows, lcols = lhs.shape
rrows, rcols = rhs.shape
compatible = ((lcols == rcols and lrows == rrows) or
(lcols == 1 and lrows == rrows) or
(lcols == 1 and lrows == 1) or
(rcols == 1 and lrows == rrows) or
(rcols == 1 and rrows == 1) or
(lrows == 1 and lcols == rcols) or
(rrows == 1 and lcols == rcols)
)
if not compatible:
raise H2OValueError("Attempting to operate on incompatible frames: (%d x %d) and (%d x %d)"
% (lrows, lcols, rrows, rcols))
if is_type(lhs, pandas_timestamp, numpy_datetime, datetime.date):
lhs = H2OFrame.moment(date=lhs)
if is_type(rhs, pandas_timestamp, numpy_datetime, datetime.date):
rhs = H2OFrame.moment(date=rhs)
cache = lhs._ex._cache if isinstance(lhs, H2OFrame) else rhs._ex._cache
res = H2OFrame._expr(expr=ExprNode(op, lhs, rhs), cache=cache)
if rtype is not None and res._ex._cache._names is not None:
res._ex._cache._types = {name: rtype for name in res._ex._cache._names}
return res
def generatePandaEnumCols(pandaFtrain, cname, nrows, domainL):
"""
For an H2O Enum column, we perform one-hot-encoding here and add one more column, "missing(NA)" to it.
:param pandaFtrain: panda frame derived from H2OFrame
:param cname: column name of enum col
:param nrows: number of rows of enum col
:return: panda frame with enum col encoded correctly for native XGBoost
"""
import numpy as np
import pandas as pd
cmissingNames=[cname+".missing(NA)"]
tempnp = np.zeros((nrows,1), dtype=np.int)
# check for nan and assign it correct value
colVals = pandaFtrain[cname]
for ind in range(nrows):
try:
if not(colVals[ind] in domainL):
tempnp[ind]=1
except ValueError:
pass
zeroFrame = pd.DataFrame(tempnp)
zeroFrame.columns=cmissingNames
temp = pd.get_dummies(pandaFtrain[cname], prefix=cname, drop_first=False)
tempNames = list(temp) # get column names
colLength = len(tempNames)
newNames = ['a']*colLength
for ind in range(0,colLength):
newNames[ind]=cname+"_"+domainL[ind]
ftemp = temp[newNames]
ctemp = pd.concat([ftemp, zeroFrame], axis=1)
return ctemp
| 41.135977
| 165
| 0.58305
|
794cb09015dcf5f4b5a89387040236137cebfabf
| 9,617
|
py
|
Python
|
lib/MCP342x.py
|
timbarnes/PyRoaster
|
b55fd84bdd6991345232a7b976a5743555807f62
|
[
"BSD-2-Clause"
] | null | null | null |
lib/MCP342x.py
|
timbarnes/PyRoaster
|
b55fd84bdd6991345232a7b976a5743555807f62
|
[
"BSD-2-Clause"
] | 5
|
2021-12-14T05:51:16.000Z
|
2021-12-14T05:58:18.000Z
|
lib/MCP342x.py
|
timbarnes/PyRoaster
|
b55fd84bdd6991345232a7b976a5743555807f62
|
[
"BSD-2-Clause"
] | null | null | null |
"""
MicroPython driver for the mcp342x ADC.
Adapted to PyCom boards from stevemarple/python-MCP342x
https://github.com/stevemarple/python-MCP342x
"""
import time
__author__ = 'Jose A. Jimenez-Berni'
__version__ = '0.0.1'
__license__ = 'MIT'
class MCP342x(object):
"""
Class to represent MCP342x ADC.
"""
_gain_mask = 0b00000011
_resolution_mask = 0b00001100
_continuous_mode_mask = 0b00010000
_channel_mask = 0b01100000
_not_ready_mask = 0b10000000
_gain_to_config = {1: 0b00,
2: 0b01,
4: 0b10,
8: 0b11}
_resolution_to_config = {12: 0b0000,
14: 0b0100,
16: 0b1000,
18: 0b1100}
_channel_to_config = {0: 0b0000000,
1: 0b0100000,
2: 0b1000000,
3: 0b1100000}
_conversion_time = {12: 1.0/240,
14: 1.0/60,
16: 1.0/15,
18: 1.0/3.75}
_resolution_to_lsb = {12: 1e-3,
14: 250e-6,
16: 62.5e-6,
18: 15.625e-6}
@staticmethod
def config_to_gain(config):
return [g for g, c in MCP342x._gain_to_config.items() if c == config & MCP342x._gain_mask][0]
@staticmethod
def config_to_resolution(config):
return [g for g, c in MCP342x._resolution_to_config.items() if c == config & MCP342x._resolution_mask][0]
@staticmethod
def config_to_lsb(config):
return MCP342x._resolution_to_lsb[MCP342x.config_to_resolution(config)]
@staticmethod
def config_to_str(config, width=8):
n = config & 0x7f
s = bin(n)[2:]
return '0b' + ('0' * (width-len(s))) + s
@staticmethod
def configure_device(i2c, address, config):
print('Configure device ' + hex(address))
self.cbuffer[0] = self.config
self.i2c.writeto(self.address, self.cbuffer)
def __init__(self,
i2c,
address,
device='MCP3424',
channel=0,
gain=1,
resolution=12,
continuous_mode=False,
scale_factor=1.0,
offset=0.0):
if device not in ('MCP3422', 'MCP3423', 'MCP3424',
'MCP3426', 'MCP3427', 'MCP3428'):
raise Exception('Unknown device: ' + str(device))
self.i2c = i2c
self.address = address
self.config = 0
self.device = device
self.scale_factor = scale_factor
self.offset = offset
self.cbuffer = bytearray(1)
self.cbuffer[0] = 0x00
self.set_channel(channel)
self.set_gain(gain)
self.set_resolution(resolution)
self.set_continuous_mode(continuous_mode)
def __repr__(self):
addr = hex(self.address)
return (type(self).__name__ + ': device=' + self.device
+ ', address=' + addr)
def get_i2c(self):
return self.i2c
def get_address(self):
return self.address
def get_gain(self):
return MCP342x.config_to_gain(self.config)
def get_resolution(self):
return MCP342x.config_to_resolution(self.config)
def get_continuous_mode(self):
return bool(self.config & MCP342x._continuous_mode_mask)
def get_channel(self):
return [g for g, c in MCP342x._channel_to_config.items() if c == self.config & MCP342x._channel_mask][0]
def get_config(self):
return self.config
def get_scale_factor(self):
return self.scale_factor
def get_offset(self):
return self.offset
def set_address(self, address):
self.address = address
def set_gain(self, gain):
if gain not in MCP342x._gain_to_config:
raise Exception('Illegal gain')
self.config &= (~MCP342x._gain_mask & 0x7f)
self.config |= MCP342x._gain_to_config[gain]
def set_resolution(self, resolution):
if resolution not in MCP342x._resolution_to_config:
raise Exception('Illegal resolution')
elif resolution == 18 and \
self.device not in ('MCP3422', 'MCP3423', 'MCP3424'):
raise Exception('18 bit sampling not suuported by ' +
self.device)
self.config &= (~MCP342x._resolution_mask & 0x7f)
self.config |= MCP342x._resolution_to_config[resolution]
def set_continuous_mode(self, continuous_mode):
if continuous_mode:
self.config |= MCP342x._continuous_mode_mask
else:
self.config &= (~MCP342x._continuous_mode_mask & 0x7f)
def set_channel(self, channel):
if channel not in MCP342x._channel_to_config:
raise Exception('Illegal channel')
elif channel in (2, 3) and \
self.device not in ('MCP3424', 'MCP3428'):
raise Exception('Channel ' + str(channel) +
' not supported by ' + self.device)
self.config &= (~MCP342x._channel_mask & 0x7f)
self.config |= MCP342x._channel_to_config[channel]
def set_scale_factor(self, scale_factor):
self.scale_factor = scale_factor
def set_offset(self, offset):
self.offset = offset
def set_config(self, config):
self.config = config & 0x7f
def get_conversion_time(self):
return MCP342x._conversion_time[self.get_resolution()]
def configure(self):
"""Configure the device.
Send the device configuration saved inside the MCP342x object to the target device."""
print('Configuring ' + hex(self.get_address())
+ ' ch: ' + str(self.get_channel())
+ ' res: ' + str(self.get_resolution())
+ ' gain: ' + str(self.get_gain()))
self.cbuffer[0] = self.config
self.i2c.writeto(self.address, self.cbuffer)
def convert(self):
"""Initiate one-shot conversion.
The current settings are used, with the exception of continuous mode."""
c = self.config
c &= (~MCP342x._continuous_mode_mask & 0x7f) # Force one-shot
c |= MCP342x._not_ready_mask # Convert
#print('Convert ' + hex(self.address) + ' config: ' + bin(c))
self.i2c.writeto(self.address, c)
def raw_read(self):
res = self.get_resolution()
bytes_to_read = 4 if res == 18 else 3
while True:
# Stupid smbus forces us to write a byte of data, even
# with its 'I2C' write command. For MCP342x this forces us
# to overwrite the configuration setting.
#
# The correct action would be to check the configuration
# reported by raw_read() matches the stored configuration
# in the object. This can't be done since we have to
# destroy the actual value before reading.
self.cbuffer[0] = self.config
self.i2c.writeto(self.address, self.cbuffer)
time.sleep_ms(25)
d = self.i2c.readfrom(self.address, bytes_to_read)
config_used = d[-1]
if config_used & MCP342x._not_ready_mask == 0:
count = 0
for i in range(bytes_to_read - 1):
count <<= 8
count |= d[i]
sign_bit_mask = 1 << (res - 1)
count_mask = sign_bit_mask - 1
sign_bit = count & sign_bit_mask
count &= count_mask
if sign_bit:
count = -(~count & count_mask) - 1
return count, config_used
def read(self, scale_factor=None, offset=None, raw=False):
if scale_factor is None:
scale_factor = self.scale_factor
if offset is None:
offset = self.offset
count, config_used = self.raw_read()
# Go through the motions of checking that the configuration
# matches. Until raw_read() is able to read without
# overwriting the configuration setting this is unlikely to be
# very useful.
if config_used != self.config:
raise Exception('Config does not match ('
+ MCP342x.config_to_str(config_used) + ' != '
+ MCP342x.config_to_str(self.config))
if raw:
return count
lsb = MCP342x.config_to_lsb(config_used)
# With the standard scale_factor=1 this returns the voltage
# difference between IN+ and IN-. Other scale_factors can be
# used to account for gain or attenuation, or to convert
# voltage to some sensor input value.
voltage = (count * lsb * scale_factor / MCP342x.config_to_gain(config_used)) + offset
return voltage
def convert_and_read(self,
sleep=True,
samples=None,
aggregate=None,
**kwargs):
if samples is not None:
r = [0] * samples
for sn in ([0] if samples is None else range(samples)):
self.convert()
if sleep:
time.sleep(0.95 * self.get_conversion_time())
val = self.read(**kwargs)
if samples is not None:
r[sn] = val
else:
r = val
if aggregate:
r = aggregate(r)
return r
| 34.844203
| 113
| 0.559842
|
794cb10035bf1d487eab3f1419c63acb685d281f
| 3,072
|
py
|
Python
|
opencv-2.4.11/samples/python2/lk_track.py
|
durai-chellamuthu/node-opencv
|
a9c18c77b2fe0f62f2f8376854bdf33de71f5dc3
|
[
"MIT"
] | 55
|
2015-06-20T20:15:33.000Z
|
2022-02-10T02:45:14.000Z
|
opencv-2.4.11/samples/python2/lk_track.py
|
durai-chellamuthu/node-opencv
|
a9c18c77b2fe0f62f2f8376854bdf33de71f5dc3
|
[
"MIT"
] | 8
|
2015-06-20T18:46:52.000Z
|
2015-10-31T11:08:04.000Z
|
opencv-2.4.11/samples/python2/lk_track.py
|
durai-chellamuthu/node-opencv
|
a9c18c77b2fe0f62f2f8376854bdf33de71f5dc3
|
[
"MIT"
] | 73
|
2015-06-20T15:59:27.000Z
|
2020-03-15T22:43:36.000Z
|
#!/usr/bin/env python
'''
Lucas-Kanade tracker
====================
Lucas-Kanade sparse optical flow demo. Uses goodFeaturesToTrack
for track initialization and back-tracking for match verification
between frames.
Usage
-----
lk_track.py [<video_source>]
Keys
----
ESC - exit
'''
import numpy as np
import cv2
import video
from common import anorm2, draw_str
from time import clock
lk_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 500,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
class App:
def __init__(self, video_src):
self.track_len = 10
self.detect_interval = 5
self.tracks = []
self.cam = video.create_capture(video_src)
self.frame_idx = 0
def run(self):
while True:
ret, frame = self.cam.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
vis = frame.copy()
if len(self.tracks) > 0:
img0, img1 = self.prev_gray, frame_gray
p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []
for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
if not good_flag:
continue
tr.append((x, y))
if len(tr) > self.track_len:
del tr[0]
new_tracks.append(tr)
cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
self.tracks = new_tracks
cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))
if self.frame_idx % self.detect_interval == 0:
mask = np.zeros_like(frame_gray)
mask[:] = 255
for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
cv2.circle(mask, (x, y), 5, 0, -1)
p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
self.tracks.append([(x, y)])
self.frame_idx += 1
self.prev_gray = frame_gray
cv2.imshow('lk_track', vis)
ch = 0xFF & cv2.waitKey(1)
if ch == 27:
break
def main():
import sys
try: video_src = sys.argv[1]
except: video_src = 0
print __doc__
App(video_src).run()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| 30.72
| 92
| 0.513346
|
794cb163720060ae7527f73f3e54aae545884d88
| 2,933
|
py
|
Python
|
simulation/client.py
|
lilybhattacharjee5/visualize-pbft
|
f9e31ee43d38f58408b7c5ab0d0ce0f93d68400a
|
[
"ADSL"
] | 1
|
2021-12-08T18:05:27.000Z
|
2021-12-08T18:05:27.000Z
|
simulation/client.py
|
lilybhattacharjee5/visualize-pbft
|
f9e31ee43d38f58408b7c5ab0d0ce0f93d68400a
|
[
"ADSL"
] | null | null | null |
simulation/client.py
|
lilybhattacharjee5/visualize-pbft
|
f9e31ee43d38f58408b7c5ab0d0ce0f93d68400a
|
[
"ADSL"
] | null | null | null |
from simulation.message_generator import generate_transaction_msg
from simulation.utils import verify_mac
import json
## CLIENT FUNCTIONS
def send_transaction(queues, client_name, primary_name, curr_transaction, curr_view, p, client_primary_session_key, replica_names):
for q_name, q in queues.items():
if q_name != client_name:
if q_name == primary_name:
transaction_msg = generate_transaction_msg(client_name, primary_name, curr_transaction, curr_view, p, client_primary_session_key, replica_names)
q["to_machine"].put((transaction_msg, primary_name))
else:
print("Client sending transaction to replica", q_name)
q["to_machine"].put((None, primary_name))
def replica_ack_primary(to_client, num_replicas, m_queue):
for i in range(num_replicas):
to_client["to_machine"].get()
m_queue.put("replica inform done")
to_client["from_main"].get()
def recv_inform(to_client, f, visible_log, client_session_keys):
# gather inform messages from f + 1 distinct senders
sender_counts = {}
senders = {}
while True:
received = False
while not received:
queue_elem = to_client["to_machine"].get()
if len(queue_elem) == 1 and type(queue_elem[0]) == dict and queue_elem[0]["Type"] == "Inform":
received = True
curr_sender = queue_elem[0]["Sender"]
# verify that the inform message is signed by the sender
curr_inform_communication = queue_elem[0]["Communication"]
curr_msg = curr_inform_communication["Message"]
shared_key = client_session_keys[curr_sender]
provided_digest = curr_inform_communication["Digest"]
curr_inform_data = json.loads(curr_msg)
curr_result = str(curr_inform_data["Result"])
if not verify_mac(curr_msg, shared_key, provided_digest):
continue
if curr_sender not in senders:
if curr_result not in sender_counts:
sender_counts[curr_result] = 1
else:
sender_counts[curr_result] += 1
senders[curr_sender] = True
if curr_result in sender_counts and sender_counts[curr_result] >= f + 1:
print("sender counts", sender_counts)
return True
# detected failure in pre-prepare stage -- resend transaction
elif len(queue_elem) == 1 and type(queue_elem[0]) == dict and queue_elem[0]["Type"] == "New view":
print("detected failure received by client")
return None
visible_log.append("client received {}".format(queue_elem))
visible_log.append("client has received inform messages!")
return True
| 47.306452
| 160
| 0.617457
|
794cb1ac127e080b1eb69b83fff9e64665e64618
| 142
|
py
|
Python
|
sortingAlgorithm/__init__.py
|
BennyJane/algorithm_mad
|
4173a4cc60d0f4f87b0cb7f6bc87d1eefbaff937
|
[
"Apache-2.0"
] | null | null | null |
sortingAlgorithm/__init__.py
|
BennyJane/algorithm_mad
|
4173a4cc60d0f4f87b0cb7f6bc87d1eefbaff937
|
[
"Apache-2.0"
] | null | null | null |
sortingAlgorithm/__init__.py
|
BennyJane/algorithm_mad
|
4173a4cc60d0f4f87b0cb7f6bc87d1eefbaff937
|
[
"Apache-2.0"
] | null | null | null |
# !/usr/bin/env python
# -*-coding:utf-8 -*-
# PROJECT : algorithm_mad
# Time :2020/12/22 10:15
# Warning :The Hard Way Is Easier
| 23.666667
| 36
| 0.605634
|
794cb27f7864d8c9c2cf5839f92c7429559720a9
| 317
|
py
|
Python
|
test.py
|
saifkhan-m/SentEval
|
7cd652409864a849dfbf44ce984c2bd8cececa1d
|
[
"BSD-3-Clause"
] | null | null | null |
test.py
|
saifkhan-m/SentEval
|
7cd652409864a849dfbf44ce984c2bd8cececa1d
|
[
"BSD-3-Clause"
] | null | null | null |
test.py
|
saifkhan-m/SentEval
|
7cd652409864a849dfbf44ce984c2bd8cececa1d
|
[
"BSD-3-Clause"
] | null | null | null |
import os
print(os.path.exists('C:/Users/khans/Documents/ber_hwr_hiwi/Paper/SentEval-master/SentEval-master/data/downstream/MR/rt-polarity.pos'))
print(os.path.exists('data/downstream/MR/rt-polarity.pos'))
#C:\Users\khans\Documents\ber_hwr_hiwi\Paper\SentEval-master\SentEval-master\data\downstream\MR\rt-polarity.pos
| 79.25
| 135
| 0.817035
|
794cb33fc229ab2c186ae474d4a92693dd0dfe54
| 2,843
|
py
|
Python
|
setup.py
|
alxlampe/d3rlpy
|
af7e6bd018a51f95138d121f59c50dc36ec87e3a
|
[
"MIT"
] | null | null | null |
setup.py
|
alxlampe/d3rlpy
|
af7e6bd018a51f95138d121f59c50dc36ec87e3a
|
[
"MIT"
] | null | null | null |
setup.py
|
alxlampe/d3rlpy
|
af7e6bd018a51f95138d121f59c50dc36ec87e3a
|
[
"MIT"
] | null | null | null |
import os
from setuptools import setup, Extension
os.environ['CFLAGS'] = '-std=c++11'
if __name__ == "__main__":
from numpy import get_include
from Cython.Build import cythonize
# setup Cython build
ext = Extension('d3rlpy.dataset',
sources=['d3rlpy/dataset.pyx'],
include_dirs=[get_include(), 'd3rlpy/cpp/include'],
language='c++',
extra_compile_args=["-std=c++11", "-O3", "-ffast-math", "-march=native"],
extra_link_args=["-std=c++11"])
ext_modules = cythonize([ext],
compiler_directives={
'linetrace': True,
'binding': True
})
# main setup
setup(name="d3rlpy",
version="0.40",
description="Data-driven Deep Reinforcement Learning Library as an Out-of-the-box Tool",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/takuseno/d3rlpy",
author="Takuma Seno",
author_email="takuma.seno@gmail.com",
license="MIT License",
classifiers=["Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Operating System :: POSIX :: Linux",
'Operating System :: Microsoft :: Windows',
"Operating System :: MacOS :: MacOS X"],
install_requires=["torch",
"scikit-learn",
"tensorboardX",
"tqdm",
"GPUtil",
"h5py",
"gym",
"kornia"],
packages=["d3rlpy",
"d3rlpy.algos",
"d3rlpy.algos.torch",
"d3rlpy.augmentation",
"d3rlpy.dynamics",
"d3rlpy.dynamics.torch",
"d3rlpy.metrics",
"d3rlpy.models",
"d3rlpy.models.torch",
"d3rlpy.preprocessing",
"d3rlpy.online"],
python_requires=">=3.5.0",
zip_safe=False,
package_data={'d3rlpy': ['*.pyx', '*.pxd', '*.h']},
ext_modules=ext_modules)
| 41.202899
| 98
| 0.460429
|
794cb374877fac19cc5bf5ee6103dd519130b7e6
| 8,363
|
py
|
Python
|
src/robot/result/resultbuilder.py
|
N-Aero/robotframework
|
e15664f80e830f30209f118117bf361c74ebe7fb
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-02-17T11:02:52.000Z
|
2022-02-17T11:02:52.000Z
|
src/robot/result/resultbuilder.py
|
N-Aero/robotframework
|
e15664f80e830f30209f118117bf361c74ebe7fb
|
[
"ECL-2.0",
"Apache-2.0"
] | 24
|
2021-01-26T07:07:27.000Z
|
2022-03-03T06:41:53.000Z
|
src/robot/result/resultbuilder.py
|
rticau/robotframework
|
33ee46dfacd5173c0a38d89c1a60abf6a747c8c0
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-11-30T18:49:45.000Z
|
2018-11-30T18:49:45.000Z
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
from robot.model import SuiteVisitor
from robot.utils import ET, ETSource, get_error_message, unic
from .executionresult import Result, CombinedResult
from .flattenkeywordmatcher import (FlattenByNameMatcher, FlattenByTypeMatcher,
FlattenByTagMatcher)
from .merger import Merger
from .xmlelementhandlers import XmlElementHandler
def ExecutionResult(*sources, **options):
"""Factory method to constructs :class:`~.executionresult.Result` objects.
:param sources: XML source(s) containing execution results.
Can be specified as paths, opened file objects, or strings/bytes
containing XML directly. Support for bytes is new in RF 3.2.
:param options: Configuration options.
Using ``merge=True`` causes multiple results to be combined so that
tests in the latter results replace the ones in the original.
Setting ``rpa`` either to ``True`` (RPA mode) or ``False`` (test
automation) sets execution mode explicitly. By default it is got
from processed output files and conflicting modes cause an error.
Other options are passed directly to the
:class:`ExecutionResultBuilder` object used internally.
:returns: :class:`~.executionresult.Result` instance.
Should be imported by external code via the :mod:`robot.api` package.
See the :mod:`robot.result` package for a usage example.
"""
if not sources:
raise DataError('One or more data source needed.')
if options.pop('merge', False):
return _merge_results(sources[0], sources[1:], options)
if len(sources) > 1:
return _combine_results(sources, options)
return _single_result(sources[0], options)
def _merge_results(original, merged, options):
result = ExecutionResult(original, **options)
merger = Merger(result, rpa=result.rpa)
for path in merged:
merged = ExecutionResult(path, **options)
merger.merge(merged)
return result
def _combine_results(sources, options):
return CombinedResult(ExecutionResult(src, **options) for src in sources)
def _single_result(source, options):
ets = ETSource(source)
result = Result(source, rpa=options.pop('rpa', None))
try:
return ExecutionResultBuilder(ets, **options).build(result)
except IOError as err:
error = err.strerror
except:
error = get_error_message()
raise DataError("Reading XML source '%s' failed: %s" % (unic(ets), error))
class ExecutionResultBuilder(object):
"""Builds :class:`~.executionresult.Result` objects based on output files.
Instead of using this builder directly, it is recommended to use the
:func:`ExecutionResult` factory method.
"""
def __init__(self, source, include_keywords=True, flattened_keywords=None):
"""
:param source: Path to the XML output file to build
:class:`~.executionresult.Result` objects from.
:param include_keywords: Boolean controlling whether to include
keyword information in the result or not. Keywords are
not needed when generating only report. Although the the option name
has word "keyword", it controls also including FOR and IF structures.
:param flatten_keywords: List of patterns controlling what keywords to
flatten. See the documentation of ``--flattenkeywords`` option for
more details.
"""
self._source = source \
if isinstance(source, ETSource) else ETSource(source)
self._include_keywords = include_keywords
self._flattened_keywords = flattened_keywords
def build(self, result):
# Parsing is performance optimized. Do not change without profiling!
handler = XmlElementHandler(result)
with self._source as source:
self._parse(source, handler.start, handler.end)
result.handle_suite_teardown_failures()
if not self._include_keywords:
result.suite.visit(RemoveKeywords())
return result
def _parse(self, source, start, end):
context = ET.iterparse(source, events=('start', 'end'))
if not self._include_keywords:
context = self._omit_keywords(context)
elif self._flattened_keywords:
context = self._flatten_keywords(context, self._flattened_keywords)
for event, elem in context:
if event == 'start':
start(elem)
else:
end(elem)
elem.clear()
def _omit_keywords(self, context):
omitted_kws = 0
for event, elem in context:
# Teardowns aren't omitted yet to allow checking suite teardown status.
# They'll be removed later when not needed in `build()`.
omit = elem.tag in ('kw', 'for', 'if') and elem.get('type') != 'TEARDOWN'
start = event == 'start'
if omit and start:
omitted_kws += 1
if not omitted_kws:
yield event, elem
elif not start:
elem.clear()
if omit and not start:
omitted_kws -= 1
def _flatten_keywords(self, context, flattened):
# Performance optimized. Do not change without profiling!
name_match, by_name = self._get_matcher(FlattenByNameMatcher, flattened)
type_match, by_type = self._get_matcher(FlattenByTypeMatcher, flattened)
tags_match, by_tags = self._get_matcher(FlattenByTagMatcher, flattened)
started = -1 # if 0 or more, we are flattening
tags = []
containers = {'kw', 'for', 'iter', 'if'}
inside_kw = 0 # to make sure we don't read tags from a test
seen_doc = False
for event, elem in context:
tag = elem.tag
start = event == 'start'
end = not start
if start and tag in containers:
inside_kw += 1
if started >= 0:
started += 1
elif by_name and name_match(elem.get('name', ''), elem.get('library')):
started = 0
seen_doc = False
elif by_type and type_match(tag):
started = 0
seen_doc = False
elif started < 0 and by_tags and inside_kw:
if end and tag == 'tag':
tags.append(elem.text or '')
elif end and tags:
if tags_match(tags):
started = 0
seen_doc = False
tags = []
if end and tag in containers:
inside_kw -= 1
if started == 0 and not seen_doc:
doc = ET.Element('doc')
doc.text = '_*Keyword content flattened.*_'
yield 'start', doc
yield 'end', doc
if started == 0 and end and tag == 'doc':
seen_doc = True
elem.text = ('%s\n\n_*Keyword content flattened.*_'
% (elem.text or '')).strip()
if started <= 0 or tag == 'msg':
yield event, elem
else:
elem.clear()
if started >= 0 and end and tag in containers:
started -= 1
def _get_matcher(self, matcher_class, flattened):
matcher = matcher_class(flattened)
return matcher.match, bool(matcher)
class RemoveKeywords(SuiteVisitor):
def start_suite(self, suite):
suite.setup = None
suite.teardown = None
def visit_test(self, test):
test.body = []
| 40.597087
| 87
| 0.617482
|
794cb4326088c53e7b9066955bbcd0f6f69e5e11
| 353
|
py
|
Python
|
rcommander_pr2_gui/src/rcommander_pr2_gui/rcommander_pr2_server.py
|
gt-ros-pkg/rcommander-pr2
|
7c7408db30525b80a77dde34b4a8af68ec23d5a4
|
[
"BSD-3-Clause"
] | 3
|
2015-03-18T10:20:13.000Z
|
2021-03-01T02:46:20.000Z
|
rcommander_pr2_gui/src/rcommander_pr2_gui/rcommander_pr2_server.py
|
gt-ros-pkg/rcommander-pr2
|
7c7408db30525b80a77dde34b4a8af68ec23d5a4
|
[
"BSD-3-Clause"
] | null | null | null |
rcommander_pr2_gui/src/rcommander_pr2_gui/rcommander_pr2_server.py
|
gt-ros-pkg/rcommander-pr2
|
7c7408db30525b80a77dde34b4a8af68ec23d5a4
|
[
"BSD-3-Clause"
] | 2
|
2016-08-03T21:50:59.000Z
|
2021-03-01T02:46:20.000Z
|
#!/usr/bin/python
import roslib; roslib.load_manifest('rcommander_pr2_gui')
import pypr2.pr2_utils as pu
import rcommander_web.rcommander_auto_server as rcs
import sys
import tf
import rospy
# "home/haidai/Desktop/rcommander")
rospy.init_node('rcommander_pr2_server')
path = sys.argv[1]
tf = tf.TransformListener()
pr2 = pu.PR2(tf)
rcs.run(pr2, path)
| 23.533333
| 57
| 0.787535
|
794cb6e91f7daa38a9126974762b4541b398cb9b
| 1,520
|
py
|
Python
|
example/locfdr_compute.py
|
yangle293/FDRnet
|
8906936b192cd8905e7fd12e1fabed5ace962d6c
|
[
"MIT"
] | 2
|
2021-02-17T17:24:47.000Z
|
2022-01-06T14:17:30.000Z
|
example/locfdr_compute.py
|
yangle293/FDRnet
|
8906936b192cd8905e7fd12e1fabed5ace962d6c
|
[
"MIT"
] | null | null | null |
example/locfdr_compute.py
|
yangle293/FDRnet
|
8906936b192cd8905e7fd12e1fabed5ace962d6c
|
[
"MIT"
] | 4
|
2019-11-14T15:56:06.000Z
|
2021-03-15T23:54:18.000Z
|
from __future__ import print_function
import sys
sys.path.insert(0, "../locfdr-python")
from locfdr import locfdr
from collections import OrderedDict
from os import listdir
from os.path import isfile, join, basename, splitext
import math
import numpy as np
import re
from scipy.stats import norm as norm
def run(pfilename):
ff = open(join(pfilename))
gene = []
pp = []
zz = []
for line in ff:
gene.append(line.strip().split('\t')[0])
pp.append(float(line.strip().split('\t')[1]))
ff.close()
zz = -norm.ppf(pp)
# eliminate the -infs
index_noninfs = [i for i in range(len(zz)) if zz[i] != -float('Inf') and zz[i] != float('Inf')]
tmp = [zz[i] for i in index_noninfs]
results = locfdr([zz[i] for i in index_noninfs],saveplot=True,saveroot=pfilename,showplot=False)
fdr_noninfs = results['fdr']
fdr_dict = OrderedDict(zip(index_noninfs,fdr_noninfs))
fdr = [1 for i in range(len(zz))]
for i in range(len(zz)):
if i in index_noninfs:
fdr[i] = fdr_dict[i]
elif zz[i] == float('Inf'):
fdr[i] = 0
for i in range(len(fdr)):
if zz[i] < 0:
fdr[i] = 1.0
if (zz[i] > 0) and math.isnan(fdr[i]):
fdr[i] = 0.0
data = OrderedDict(zip(gene,fdr))
output = splitext(pfilename)[0] + '_lfdr.txt'
with open(output,'w') as fout:
for (gene,fdr) in data.items():
fout.write("{}\t{}\n".format(gene,fdr))
if __name__ == "__main__":
run(sys.argv[1])
| 30.4
| 100
| 0.599342
|
794cb7227c3058fd8f38fa9a89e969b32b10ad5d
| 29,536
|
py
|
Python
|
core/domain/summary_services.py
|
jlau323/oppia
|
37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691
|
[
"Apache-2.0"
] | 2
|
2021-04-08T01:06:08.000Z
|
2021-06-02T08:20:13.000Z
|
core/domain/summary_services.py
|
gitter-badger/oppia
|
7d8e659264582d7ce74bc6c139e597b82bca0e04
|
[
"Apache-2.0"
] | 1
|
2020-05-27T06:08:17.000Z
|
2020-05-27T06:08:17.000Z
|
core/domain/summary_services.py
|
gitter-badger/oppia
|
7d8e659264582d7ce74bc6c139e597b82bca0e04
|
[
"Apache-2.0"
] | 1
|
2020-11-05T12:26:10.000Z
|
2020-11-05T12:26:10.000Z
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands that can be used to operate on activity summaries."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from constants import constants
from core.domain import activity_services
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import rights_domain
from core.domain import rights_manager
from core.domain import search_services
from core.domain import stats_services
from core.domain import user_services
import python_utils
import utils
_LIBRARY_INDEX_GROUPS = [{
'header_i18n_id': 'I18N_LIBRARY_GROUPS_MATHEMATICS_&_STATISTICS',
'search_categories': [
'Mathematics', 'Algebra', 'Arithmetic', 'Calculus', 'Combinatorics',
'Geometry', 'Graph Theory', 'Logic', 'Probability', 'Statistics',
'Trigonometry',
],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_COMPUTING',
'search_categories': ['Algorithms', 'Computing', 'Programming'],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_SCIENCE',
'search_categories': [
'Astronomy', 'Biology', 'Chemistry', 'Engineering', 'Environment',
'Medicine', 'Physics',
],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_HUMANITIES',
'search_categories': [
'Architecture', 'Art', 'Music', 'Philosophy', 'Poetry'
],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_LANGUAGES',
'search_categories': [
'Languages', 'Reading', 'English', 'Latin', 'Spanish', 'Gaulish'
],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_SOCIAL_SCIENCE',
'search_categories': [
'Business', 'Economics', 'Geography', 'Government', 'History', 'Law'
],
}]
def get_human_readable_contributors_summary(contributors_summary):
"""Gets contributors summary in human readable form.
Args:
contributors_summary: dict. The keys are user ids and
the values are the number of commits made by that user.
Returns:
dict. Dicts of contributors in human readable form; the keys are
usernames and the values are a dict. Example:
{
'albert': {
'num_commits': 10,
},
}
"""
contributor_ids = list(contributors_summary.keys())
contributor_usernames = user_services.get_human_readable_user_ids(
contributor_ids)
return {
contributor_usernames[ind]: {
'num_commits': contributors_summary[contributor_ids[ind]],
}
for ind in python_utils.RANGE(len(contributor_ids))
}
def get_learner_collection_dict_by_id(
collection_id, user, strict=True,
allow_invalid_explorations=False, version=None):
"""Gets a dictionary representation of a collection given by the provided
collection ID. This dict includes user-specific playthrough information.
Args:
collection_id: str. The id of the collection.
user: UserActionsInfo. Object having user_id, role and actions for
given user.
strict: bool. Whether to fail noisily if no collection with the given
id exists in the datastore.
allow_invalid_explorations: bool. Whether to also return explorations
that are invalid, such as deleted/private explorations.
version: str or None. The version number of the collection to be
retrieved. If it is None, the latest version will be retrieved.
Returns:
dict. A dictionary that contains extra information along with the dict
returned by collection_domain.Collection.to_dict() which includes useful
data for the collection learner view. The information includes progress
in the collection, information about explorations referenced within the
collection, and a slightly nicer data structure for frontend work.
Raises:
ValidationError. If the collection retrieved using the given
ID references non-existent explorations.
"""
collection = collection_services.get_collection_by_id(
collection_id, strict=strict, version=version)
exp_ids = collection.exploration_ids
exp_summary_dicts = get_displayable_exp_summary_dicts_matching_ids(
exp_ids, user=user)
exp_summaries_dict_map = {
exp_summary_dict['id']: exp_summary_dict
for exp_summary_dict in exp_summary_dicts
}
# TODO(bhenning): Users should not be recommended explorations they have
# completed outside the context of a collection (see #1461).
next_exploration_id = None
completed_exp_ids = None
if user.user_id:
completed_exp_ids = (
collection_services.get_valid_completed_exploration_ids(
user.user_id, collection))
next_exploration_id = collection.get_next_exploration_id(
completed_exp_ids)
else:
# If the user is not logged in or they have not completed any of
# the explorations yet within the context of this collection,
# recommend the initial exploration.
next_exploration_id = collection.first_exploration_id
completed_exp_ids = []
collection_dict = collection.to_dict()
collection_dict['nodes'] = [
node.to_dict() for node in collection.nodes]
collection_dict['playthrough_dict'] = {
'next_exploration_id': next_exploration_id,
'completed_exploration_ids': completed_exp_ids
}
collection_dict['version'] = collection.version
collection_is_public = rights_manager.is_collection_public(collection_id)
# Insert an 'exploration' dict into each collection node, where the
# dict includes meta information about the exploration (ID and title).
for collection_node in collection_dict['nodes']:
exploration_id = collection_node['exploration_id']
summary_dict = exp_summaries_dict_map.get(exploration_id)
if not allow_invalid_explorations:
if not summary_dict:
raise utils.ValidationError(
'Expected collection to only reference valid '
'explorations, but found an exploration with ID: %s (was '
'the exploration deleted or is it a private exploration '
'that you do not have edit access to?)'
% exploration_id)
if collection_is_public and rights_manager.is_exploration_private(
exploration_id):
raise utils.ValidationError(
'Cannot reference a private exploration within a public '
'collection, exploration ID: %s' % exploration_id)
if summary_dict:
collection_node['exploration_summary'] = summary_dict
else:
collection_node['exploration_summary'] = None
return collection_dict
def get_displayable_collection_summary_dicts_matching_ids(collection_ids):
"""Returns a list of collection summary dicts corresponding to the given
collection ids.
Args:
collection_ids: list(str). A list of collection ids.
Returns:
list(dict). Each element in this list is a collection summary dict.
These elements are returned in the same order as that given
in collection_ids.
"""
collection_summaries = (
collection_services.get_collection_summaries_matching_ids(
collection_ids))
return _get_displayable_collection_summary_dicts(collection_summaries)
def get_exp_metadata_dicts_matching_query(query_string, search_cursor, user):
"""Given a query string and a search cursor, returns a list of exploration
metadata dicts that satisfy the search query.
Args:
query_string: str. The search query for which the search is to be
performed.
search_cursor: str or None. The cursor location to start the search
from. If None, the returned values are from the beginning
of the results list.
user: UserActionsInfo. Object having user_id, role and actions for
given user.
Returns:
2-tuple of (exploration_list, new_search_cursor). Where:
- exploration_list list(dict). A list of metadata dicts for
explorations matching the query.
- new_search_cursor (str). New search cursor location.
"""
exp_ids, new_search_cursor = (
exp_services.get_exploration_ids_matching_query(
query_string, cursor=search_cursor))
exploration_list = get_exploration_metadata_dicts(
exp_ids, user)
return exploration_list, new_search_cursor
def get_exploration_metadata_dicts(exploration_ids, user):
"""Given a list of exploration ids, optionally filters the list for
explorations that are currently non-private and not deleted, and returns a
list of dicts of the corresponding exploration summaries for collection
node search.
Args:
exploration_ids: list(str). A list of exploration ids for which
exploration metadata dicts are to be returned.
user: UserActionsInfo. Object having user_id, role and actions for
given user.
Returns:
list(dict). A list of metadata dicts corresponding to the given
exploration ids. Each dict has three keys:
'id': the exploration id;
'title': the exploration title;
'objective': the exploration objective.
"""
exploration_summaries = (
exp_fetchers.get_exploration_summaries_matching_ids(exploration_ids))
exploration_rights_objects = (
rights_manager.get_multiple_exploration_rights_by_ids(exploration_ids))
filtered_exploration_summaries = []
for (exploration_summary, exploration_rights) in (
python_utils.ZIP(
exploration_summaries, exploration_rights_objects)):
if exploration_summary is not None and exploration_rights is not None:
if exploration_summary.status == (
rights_domain.ACTIVITY_STATUS_PRIVATE):
if user.user_id is None:
continue
if not rights_manager.check_can_edit_activity(
user, exploration_rights):
continue
filtered_exploration_summaries.append(exploration_summary)
return [
summary.to_metadata_dict()
for summary in filtered_exploration_summaries]
def get_displayable_exp_summary_dicts_matching_ids(exploration_ids, user=None):
"""Gets a summary of explorations in human readable form from
exploration ids.
Given a list of exploration ids, optionally filters the list for
explorations that are currently non-private and not deleted, and returns a
list of dicts of the corresponding exploration summaries. This function can
also filter based on a user ID who has edit access to the corresponding
exploration, where the editor ID is for private explorations. Please use
this function when needing summary information to display on exploration
summary tiles in the frontend.
Args:
exploration_ids: list(str). List of exploration ids.
user: UserActionsInfo or None. Object having user_id, role and actions
for given user.
Returns:
list(dict). A list of exploration summary dicts in human readable form.
Example:
[ {
'category': u'A category',
'community_owned': False,
'id': 'eid2',
'language_code': 'en',
'num_views': 0,
'objective': u'An objective',
'status': 'public',
'tags': [],
'thumbnail_bg_color': '#a33f40',
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'title': u'Exploration 2 Albert title',
}, ]
"""
exploration_summaries = (
exp_fetchers.get_exploration_summaries_matching_ids(exploration_ids))
exploration_rights_objects = (
rights_manager.get_multiple_exploration_rights_by_ids(exploration_ids))
filtered_exploration_summaries = []
for (exploration_summary, exploration_rights) in (
python_utils.ZIP(
exploration_summaries, exploration_rights_objects)):
if exploration_summary is not None and exploration_rights is not None:
if exploration_summary.status == (
rights_domain.ACTIVITY_STATUS_PRIVATE):
if user is None:
continue
if not rights_manager.check_can_edit_activity(
user, exploration_rights):
continue
filtered_exploration_summaries.append(exploration_summary)
return get_displayable_exp_summary_dicts(filtered_exploration_summaries)
def get_displayable_exp_summary_dicts(exploration_summaries):
"""Gets a summary of explorations in human readable form.
Given a list of exploration summary domain objects, returns a list,
with the same number of elements, of the corresponding human-readable
exploration summary dicts.
This assumes that all the exploration summary domain objects passed in are
valid (i.e., none of them are None).
Args:
exploration_summaries: list(ExplorationSummary). List of exploration
summary objects.
Returns:
list(dict). A list of exploration summary dicts in human readable form.
Example:
[ {
'category': u'A category',
'community_owned': False,
'id': 'eid2',
'language_code': 'en',
'num_views': 0,
'objective': u'An objective',
'status': 'public',
'tags': [],
'thumbnail_bg_color': '#a33f40',
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'title': u'Exploration 2 Albert title',
}, ]
"""
exp_version_references = [
exp_domain.ExpVersionReference(exp_summary.id, exp_summary.version)
for exp_summary in exploration_summaries]
exp_stats_list = stats_services.get_exploration_stats_multi(
exp_version_references)
view_counts = [exp_stats.num_starts for exp_stats in exp_stats_list]
displayable_exp_summaries = []
for ind, exploration_summary in enumerate(exploration_summaries):
if exploration_summary:
summary_dict = {
'id': exploration_summary.id,
'title': exploration_summary.title,
'activity_type': constants.ACTIVITY_TYPE_EXPLORATION,
'category': exploration_summary.category,
'created_on_msec': utils.get_time_in_millisecs(
exploration_summary.exploration_model_created_on),
'objective': exploration_summary.objective,
'language_code': exploration_summary.language_code,
'last_updated_msec': utils.get_time_in_millisecs(
exploration_summary.exploration_model_last_updated
),
'human_readable_contributors_summary': (
get_human_readable_contributors_summary(
exploration_summary.contributors_summary)
),
'status': exploration_summary.status,
'ratings': exploration_summary.ratings,
'community_owned': exploration_summary.community_owned,
'tags': exploration_summary.tags,
'thumbnail_icon_url': utils.get_thumbnail_icon_url_for_category(
exploration_summary.category),
'thumbnail_bg_color': utils.get_hex_color_for_category(
exploration_summary.category),
'num_views': view_counts[ind],
}
displayable_exp_summaries.append(summary_dict)
return displayable_exp_summaries
def _get_displayable_collection_summary_dicts(collection_summaries):
"""Gets a summary of collections in human readable form.
Args:
collection_summaries: list(CollectionSummary). List of collection
summary domain object.
Returns:
list(dict). A list of exploration summary dicts in human readable form.
Example:
[ {
'category': u'A category',
'community_owned': False,
'id': 'eid2',
'language_code': 'en',
'num_views': 0,
'objective': u'An objective',
'status': 'public',
'tags': [],
'thumbnail_bg_color': '#a33f40',
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'title': u'Exploration 2 Albert title',
}, ]
"""
displayable_collection_summaries = []
for collection_summary in collection_summaries:
if collection_summary and collection_summary.status != (
rights_domain.ACTIVITY_STATUS_PRIVATE):
displayable_collection_summaries.append({
'id': collection_summary.id,
'title': collection_summary.title,
'category': collection_summary.category,
'activity_type': constants.ACTIVITY_TYPE_COLLECTION,
'objective': collection_summary.objective,
'language_code': collection_summary.language_code,
'tags': collection_summary.tags,
'node_count': collection_summary.node_count,
'last_updated_msec': utils.get_time_in_millisecs(
collection_summary.collection_model_last_updated),
'thumbnail_icon_url': (
utils.get_thumbnail_icon_url_for_category(
collection_summary.category)),
'thumbnail_bg_color': utils.get_hex_color_for_category(
collection_summary.category)})
return displayable_collection_summaries
def get_library_groups(language_codes):
"""Returns a list of groups for the library index page. Each group has a
header and a list of dicts representing activity summaries.
Args:
language_codes: list(str). A list of language codes. Only explorations
with these languages will be returned.
Returns:
list(dict). A list of groups for the library index page. Each group is
represented by a dict with the following keys and values:
- activity_summary_dicts: list(dict). A list of dicts representing
activity summaries.
- categories: list(str). The list of group categories.
- header_i18n_id: str. The i18n id for the header of the category.
- has_full_results_page: bool. Whether the group header links to
a "full results" page. This is always True for the
"exploration category" groups.
- full_results_url: str. The URL to the corresponding "full results"
page.
"""
language_codes_suffix = ''
if language_codes:
language_codes_suffix = ' language_code=("%s")' % (
'" OR "'.join(language_codes))
def _generate_query(categories):
"""Generates query based on the categories and language codes.
Args:
categories: list(str). List of categories.
Returns:
str. Generated query.
"""
# This assumes that 'categories' is non-empty.
return 'category=("%s")%s' % (
'" OR "'.join(categories), language_codes_suffix)
# Collect all collection ids so that the summary details can be retrieved
# with a single get_multi() call.
all_collection_ids = []
header_id_to_collection_ids = {}
for group in _LIBRARY_INDEX_GROUPS:
collection_ids = search_services.search_collections(
_generate_query(group['search_categories']), 8)[0]
header_id_to_collection_ids[group['header_i18n_id']] = collection_ids
all_collection_ids += collection_ids
collection_summaries = [
summary for summary in
collection_services.get_collection_summaries_matching_ids(
all_collection_ids)
if summary is not None]
collection_summary_dicts = {
summary_dict['id']: summary_dict
for summary_dict in _get_displayable_collection_summary_dicts(
collection_summaries)
}
# Collect all exp ids so that the summary details can be retrieved with a
# single get_multi() call.
all_exp_ids = []
header_to_exp_ids = {}
for group in _LIBRARY_INDEX_GROUPS:
exp_ids = search_services.search_explorations(
_generate_query(group['search_categories']), 8)[0]
header_to_exp_ids[group['header_i18n_id']] = exp_ids
all_exp_ids += exp_ids
exp_summaries = [
summary for summary in
exp_fetchers.get_exploration_summaries_matching_ids(all_exp_ids)
if summary is not None]
exp_summary_dicts = {
summary_dict['id']: summary_dict
for summary_dict in get_displayable_exp_summary_dicts(exp_summaries)
}
results = []
for group in _LIBRARY_INDEX_GROUPS:
summary_dicts = []
collection_ids_to_display = (
header_id_to_collection_ids[group['header_i18n_id']])
summary_dicts = [
collection_summary_dicts[collection_id]
for collection_id in collection_ids_to_display
if collection_id in collection_summary_dicts]
exp_ids_to_display = header_to_exp_ids[group['header_i18n_id']]
summary_dicts += [
exp_summary_dicts[exp_id] for exp_id in exp_ids_to_display
if exp_id in exp_summary_dicts]
if not summary_dicts:
continue
results.append({
'header_i18n_id': group['header_i18n_id'],
'categories': group['search_categories'],
'activity_summary_dicts': summary_dicts,
'has_full_results_page': True,
'full_results_url': None,
})
return results
def require_activities_to_be_public(activity_references):
"""Raises an exception if any activity reference in the list does not
exist, or is not public.
Args:
activity_references: list(ActivityReference). A list of
ActivityReference domain objects.
Raises:
Exception. Any activity reference in the list does not
exist, or is not public.
"""
exploration_ids, collection_ids = activity_services.split_by_type(
activity_references)
activity_summaries_by_type = [{
'type': constants.ACTIVITY_TYPE_EXPLORATION,
'ids': exploration_ids,
'summaries': exp_fetchers.get_exploration_summaries_matching_ids(
exploration_ids),
}, {
'type': constants.ACTIVITY_TYPE_COLLECTION,
'ids': collection_ids,
'summaries': collection_services.get_collection_summaries_matching_ids(
collection_ids),
}]
for activities_info in activity_summaries_by_type:
for index, summary in enumerate(activities_info['summaries']):
if summary is None:
raise Exception(
'Cannot feature non-existent %s with id %s' %
(activities_info['type'], activities_info['ids'][index]))
if summary.status == rights_domain.ACTIVITY_STATUS_PRIVATE:
raise Exception(
'Cannot feature private %s with id %s' %
(activities_info['type'], activities_info['ids'][index]))
def get_featured_activity_summary_dicts(language_codes):
"""Returns a list of featured activities with the given language codes.
The return value is sorted according to the list stored in the datastore.
Args:
language_codes: list(str). A list of language codes. Only explorations
with these languages will be returned.
Returns:
list(dict). Each dict in this list represents a featured activity.
For example:
[ {
'status': 'public',
'thumbnail_bg_color': '#a33f40',
'community_owned': False,
'tags': [],
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'language_code': constants.DEFAULT_LANGUAGE_CODE,
'id': 'eid2',
'category': 'A category',
'ratings': feconf.get_empty_ratings(),
'title': 'A title',
'num_views': 0,
'objective': 'An objective',
}, ]
"""
activity_references = activity_services.get_featured_activity_references()
exploration_ids, collection_ids = activity_services.split_by_type(
activity_references)
exp_summary_dicts = get_displayable_exp_summary_dicts_matching_ids(
exploration_ids)
col_summary_dicts = get_displayable_collection_summary_dicts_matching_ids(
collection_ids)
summary_dicts_by_id = {
constants.ACTIVITY_TYPE_EXPLORATION: {
summary_dict['id']: summary_dict
for summary_dict in exp_summary_dicts
},
constants.ACTIVITY_TYPE_COLLECTION: {
summary_dict['id']: summary_dict
for summary_dict in col_summary_dicts
},
}
featured_summary_dicts = []
for reference in activity_references:
if reference.id in summary_dicts_by_id[reference.type]:
summary_dict = summary_dicts_by_id[reference.type][reference.id]
if summary_dict and summary_dict['language_code'] in language_codes:
featured_summary_dicts.append(summary_dict)
return featured_summary_dicts
def get_top_rated_exploration_summary_dicts(language_codes, limit):
"""Returns a list of top rated explorations with the given language codes.
The return value is sorted in decreasing order of average rating.
Args:
language_codes: list(str). A list of language codes. Only explorations
with these languages will be returned.
limit: int. The maximum number of explorations to return.
Returns:
list(dict). Each dict in this list represents a exploration summary in
human readable form. The list is sorted in decreasing order of average
rating. For example:
[ {
'category': u'A category',
'community_owned': False,
'id': 'eid2',
'language_code': 'en',
'num_views': 0,
'objective': u'An objective',
'status': 'public',
'tags': [],
'thumbnail_bg_color': '#a33f40',
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'title': u'Exploration 2 Albert title',
}, ]
"""
filtered_exp_summaries = [
exp_summary for exp_summary in
exp_services.get_top_rated_exploration_summaries(limit).values()
if exp_summary.language_code in language_codes and
sum(exp_summary.ratings.values()) > 0]
sorted_exp_summaries = sorted(
filtered_exp_summaries,
key=lambda exp_summary: exp_summary.scaled_average_rating,
reverse=True)
return get_displayable_exp_summary_dicts(sorted_exp_summaries)
def get_recently_published_exp_summary_dicts(limit):
"""Returns a list of recently published explorations.
Args:
limit: int. The maximum number of explorations to return.
Returns:
list(dict). Each dict in this list represents a featured activity in
human readable form. For example:
[ {
'category': u'A category',
'community_owned': False,
'id': 'eid2',
'language_code': 'en',
'num_views': 0,
'objective': u'An objective',
'status': 'public',
'tags': [],
'thumbnail_bg_color': '#a33f40',
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'title': u'Exploration 2 Albert title',
}, ]
"""
recently_published_exploration_summaries = [
exp_summary for exp_summary in
exp_services.get_recently_published_exp_summaries(limit).values()]
# Arranging recently published exploration summaries with respect to time.
# sorted() is used to sort the random list of recently published summaries.
summaries = sorted(
recently_published_exploration_summaries,
key=lambda exp_summary: exp_summary.first_published_msec,
reverse=True)
return get_displayable_exp_summary_dicts(summaries)
| 39.276596
| 80
| 0.659568
|
794cb7cb8c1a6625fcc5cba4edfefd2ef9b817d4
| 4,361
|
py
|
Python
|
src/primaires/scripting/fonctions/est_de_type.py
|
vlegoff/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 14
|
2015-08-21T19:15:21.000Z
|
2017-11-26T13:59:17.000Z
|
src/primaires/scripting/fonctions/est_de_type.py
|
vincent-lg/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 20
|
2015-09-29T20:50:45.000Z
|
2018-06-21T12:58:30.000Z
|
src/primaires/scripting/fonctions/est_de_type.py
|
vlegoff/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 3
|
2015-05-02T19:42:03.000Z
|
2018-09-06T10:55:00.000Z
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la fonction est_de_type."""
from primaires.scripting.fonction import Fonction
from primaires.scripting.instruction import ErreurExecution
class ClasseFonction(Fonction):
"""Retourne vrai si l'objet ou prototype est de type indiqué."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.est_de_type_objet, "Objet", "str")
cls.ajouter_types(cls.est_de_type_objet, "PrototypeObjet", "str")
cls.ajouter_types(cls.est_de_type_cle, "str", "str")
@staticmethod
def est_de_type_objet(objet, nom_type):
"""Retourne vrai si l'objet est du type indiqué.
Retourne vrai également si le nom de type est un parent du
type de l'objet. Par exemple, si l'objet est un fruit
mais que l'on test si c'est une nourriture. Vous pouvez également
préciser plusieurs tests séparés par une barre verticale (|). La
fonction retournera vrai si l'objet est au moins d'un des types indiqués.
ATTENTION : le test est sensible à l'accentuation.
Paramètres à entrer :
* objet : l'objet à tester
* nom_type : le nom du type
Exemples d'utilisation :
si est_de_type(objet, "viande"):
# Si l'objet est de type viande
finsi
si est_de_type(objet, "viande|chaussure"):
# Si l'objet est de type viande ou chaussure
finsi
"""
return any(objet.est_de_type(n) for n in nom_type.split("_b_"))
@staticmethod
def est_de_type_cle(cle, nom_type):
"""Retourne vrai si le prototype d'objet est du type indiqué.
Retourne vrai également si le nom de type est un parent du
type du prototype. Par exemple, si le prototype est un fruit
mais que l'on test si c'est une nourriture. Vous pouvez également
préciser plusieurs tests séparés par une barre verticale (|). La
fonction retournera vrai si l'objet est au moins d'un des types indiqués.
ATTENTION : le test est sensible à l'accentuation.
Paramètres à entrer :
* cle : la clé du prototype d'objet (une chaîne)
* nom_type : le nom du type
Exemples d'utilisation :
si est_de_type("viande_lapin", "viande"):
# Si l'objet est de type viande
finsi
si est_de_type("viande_lapin", "viande|chaussure"):
# Si l'objet est de type viande ou chaussure
finsi
"""
cle = cle.lower()
if not cle in importeur.objet.prototypes:
raise ErreurExecution("prototype {} introuvable".format(
repr(cle)))
prototype = importeur.objet.prototypes[cle]
return any(prototype.est_de_type(n) for n in nom_type.split("_b_"))
| 40.009174
| 81
| 0.690667
|
794cb7fe67f869299cf5ec2f7df3110a6e4ca21d
| 7,393
|
py
|
Python
|
Garteur Model/Reference Model/modal_optim_GARTEUR_COBYLA.py
|
aguptaisae/Masters-Research-Project-S2
|
b44cd21a95a60b8fb38852dac5a5b0794e1f3e3f
|
[
"Apache-2.0"
] | 4
|
2019-02-15T14:43:15.000Z
|
2021-04-13T04:22:48.000Z
|
Garteur Model/Reference Model/modal_optim_GARTEUR_COBYLA.py
|
aguptaisae/Masters-Research-Project-S2
|
b44cd21a95a60b8fb38852dac5a5b0794e1f3e3f
|
[
"Apache-2.0"
] | null | null | null |
Garteur Model/Reference Model/modal_optim_GARTEUR_COBYLA.py
|
aguptaisae/Masters-Research-Project-S2
|
b44cd21a95a60b8fb38852dac5a5b0794e1f3e3f
|
[
"Apache-2.0"
] | 3
|
2020-11-27T02:18:16.000Z
|
2021-08-04T13:47:46.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 29 10:50:10 2016
@author: © Joan Mas Colomer
"""
from __future__ import print_function
from openmdao.api import Problem, Group, IndepVarComp, ExecComp, ScipyGMRES, SqliteRecorder, ScipyOptimizer, view_model
from aerostructures import NastranDynamic, ModalFunctions, DynamicStructureProblemDimensions, DynamicStructureProblemParams
#from mayavi import mlab
import numpy as np
if __name__ == "__main__":
#Enable mode-tracking
mode_tracking = True
#Model with no rigid modes
free_free = True
#Number of normal modes to consider for the comparison
N = 5
#Number of normal modes to extract
M = 10
#Normal mode extraction method
eigr = 'FEER'
#Frequency lower bound (Hz)
F1 = 0.01
#Problem parameters (kg, mm, MPa)
E = 72000.
nu = 0.33
rho_s = 2.7E-6
omega_ratio = 1.
mass_ratio = 1.
length_ratio = 1.
#Baseline thickness and masses
# t_0 = 0.01*np.ones(4)
# m_0 = 0.02*np.ones(10)
t_0 = np.array([50.,
11.,
10.,
10.,
10.,
10.])
m_0 = np.array([0.5,
0.2,
0.2])
#Design variable boundaries
# t_max = 6.e-3*np.ones(4)
# t_min = 4.e-5*np.ones(4)
t_max = 2.*t_0
t_min = 0.5*t_0
# m_max = 9.e-2*np.ones(10)
# m_min = 2.e-2*np.ones(10)
m_max = 2.*m_0
m_min = 0.5*m_0
#Problem dimensions
dynamic_problem_dimensions = DynamicStructureProblemDimensions()
node_id_all = dynamic_problem_dimensions.node_id_all
ns_all = dynamic_problem_dimensions.ns_all
tn = dynamic_problem_dimensions.tn
mn = dynamic_problem_dimensions.mn
sn = dynamic_problem_dimensions.sn
#Problem parameters from Nastran model
dynamic_problem_params = DynamicStructureProblemParams(node_id_all, N, free_free)
#Problem parameter values
node_coord_all = length_ratio*dynamic_problem_params.node_coord_all
phi_ref = dynamic_problem_params.phi_ref
eigval_ref = dynamic_problem_params.eigval_ref
mass_ref = dynamic_problem_params.mass_ref
omega_norm_ref = omega_ratio*np.linalg.norm(np.sqrt(eigval_ref))
top = Problem()
top.root = root = Group()
#Add independent variables
root.add('s_coord_all', IndepVarComp('node_coord_all', node_coord_all), promotes=['*'])
root.add('Youngs_modulus', IndepVarComp('E', E), promotes=['*'])
root.add('Poissons_ratio', IndepVarComp('nu', nu), promotes=['*'])
root.add('material_density', IndepVarComp('rho_s', rho_s), promotes=['*'])
root.add('thicknesses', IndepVarComp('t', np.ones(tn)), promotes=['*'])
root.add('masses', IndepVarComp('m', np.ones(mn)), promotes=['*'])
root.add('reference_mass', IndepVarComp('mass_ref', mass_ref), promotes=['*'])
root.add('reference_eigvec', IndepVarComp('phi_ref', phi_ref), promotes=['*'])
root.add('reference_eigval', IndepVarComp('eigval_ref', eigval_ref), promotes=['*'])
root.add('frequency_ratio', IndepVarComp('omega_ratio', omega_ratio), promotes=['*'])
root.add('model_mass_scaling', IndepVarComp('mass_ratio', mass_ratio), promotes=['*'])
root.add('modes_number', IndepVarComp('N', float(N)), promotes=['*'])
#root.add('First Inertia', IndepVarComp('Ix', Ix), promotes=['*'])
#root.add('Second Inertia', IndepVarComp('Iy', Iy), promotes=['*'])
#root.add('Crosssection', IndepVarComp('s', s), promotes=['*'])
root.add('modal', NastranDynamic(node_id_all, tn, mn, sn, M, eigr, F1, free_free), promotes=['*'])
root.add('mod_func', ModalFunctions(node_id_all, N, M, mode_tracking), promotes=['*'])
root.add('obj_func', ExecComp('f=(N-MAC_trace)/N'), promotes=['*'])
#Add mass constraints
root.add('con_mass_upper', ExecComp('con_m_u = delta_mass'), promotes=['*'])
root.add('con_mass_lower', ExecComp('con_m_l = delta_mass'), promotes=['*'])
#Add frequency constraint components
for i in range(N):
root.add('con_freq_upper_'+str(i+1), ExecComp('delta_omega_u_'+str(i+1)+' = delta_omega['+str(i)+']', delta_omega=np.zeros(N,dtype=float)), promotes=['*'])
for i in range(N):
root.add('con_freq_lower_'+str(i+1), ExecComp('delta_omega_l_'+str(i+1)+' = delta_omega['+str(i)+']', delta_omega=np.zeros(N,dtype=float)), promotes=['*'])
#Add design variable bounds as constraints - Nastran doesn't accept negative thicknesses or sections
for i in range(tn):
root.add('t_lower_bound_'+str(i+1), ExecComp('t_l_'+str(i+1)+' = t['+str(i)+']', t=np.zeros(tn,dtype=float)), promotes=['*'])
for i in range(mn):
root.add('m_lower_bound_'+str(i+1), ExecComp('m_l_'+str(i+1)+' = m['+str(i)+']', m=np.zeros(mn,dtype=float)), promotes=['*'])
#Define solver type
root.ln_solver = ScipyGMRES()
#Define the optimizer (Scipy)
top.driver = ScipyOptimizer()
top.driver.options['optimizer'] = 'COBYLA'
top.driver.options['disp'] = True
top.driver.options['tol'] = 1.e-3
top.driver.options['maxiter'] = 500
top.driver.opt_settings['rhobeg']= 0.01
top.driver.add_desvar('t', lower=t_min, upper=t_max, adder=-t_min, scaler=1/(t_max-t_min))
top.driver.add_desvar('m', lower=m_min, upper=m_max, adder=-m_min, scaler=1/(m_max-m_min))
top.driver.add_objective('f')
scaled_mass = mass_ratio*mass_ref
top.driver.add_constraint('con_m_u', upper=0., scaler=1/scaled_mass)
top.driver.add_constraint('con_m_l', lower=0., scaler=1/scaled_mass)
for i in range(N):
top.driver.add_constraint('delta_omega_u_'+str(i+1), upper=0., scaler=1/np.sqrt(eigval_ref[i]))
for i in range(N):
top.driver.add_constraint('delta_omega_l_'+str(i+1), lower=0., scaler=1/np.sqrt(eigval_ref[i]))
for i in range(tn):
top.driver.add_constraint('t_l_'+str(i+1), lower=0., scaler=1/t_0[i])
for i in range(mn):
top.driver.add_constraint('m_l_'+str(i+1), lower=0., scaler=1/m_0[i])
#Optimization Recorder
recorder = SqliteRecorder('modal_optim_COBYLA_GARTEUR')
recorder.options['record_params'] = True
recorder.options['record_metadata'] = True
top.driver.add_recorder(recorder)
top.setup()
view_model(top, show_browser=False)
#Setting initial values for design variables
top['t'] = t_0
top['m'] = m_0
# top['t'] = np.array([50.,11.,10.,10.,10.,10.])
# top['m'] = np.array([0.5,0.2,0.2])
top.run()
top.cleanup() # this closes all recorders
#Visualization
#Points coordinates
xs = node_coord_all
#Eigenvectors
ds_ref = phi_ref
ds = root.mod_func.unknowns['ord_phi']
#Maximum span of the model
y_max = xs[:,1].max() - xs[:,1].min()
#Plots
for i in range(N):
defs_ref = xs + 0.1*y_max*np.hstack((np.split(ds_ref[:,i:i+1], 3, 0)))
defs = xs + 0.1*y_max*np.hstack((np.split(ds[:,i:i+1], 3, 0)))
mlab.figure(bgcolor = (1,1,1), fgcolor = (0,0,0))
mlab.points3d(defs_ref[:,0], defs_ref[:,1], defs_ref[:,2], color=(0,0,1), scale_factor=0.1)
mlab.points3d(defs[:,0], defs[:,1], defs[:,2], color=(1,0,0), scale_factor=0.1)
| 36.781095
| 164
| 0.626674
|
794cb81f77bdf41f3c48613d84f98414766ddab8
| 4,041
|
py
|
Python
|
hplip-3.20.3/ui/choosedevicedlg.py
|
Deril-Pana/wikiBlackcoinNL
|
9633307f0b485c27feae5da242944adf450e8963
|
[
"MIT"
] | null | null | null |
hplip-3.20.3/ui/choosedevicedlg.py
|
Deril-Pana/wikiBlackcoinNL
|
9633307f0b485c27feae5da242944adf450e8963
|
[
"MIT"
] | 1
|
2021-11-20T16:33:39.000Z
|
2021-11-20T16:33:39.000Z
|
hplip-3.20.3/ui/choosedevicedlg.py
|
Deril-Pana/wikiBlackcoinNL
|
9633307f0b485c27feae5da242944adf450e8963
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# (c) Copyright 2001-2015 HP Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Don Welch
#
from base.g import *
from base.sixext import to_unicode
import sys
from qt import *
class ChooseDeviceDlg(QDialog):
def __init__(self, devices, parent = None,name = None,modal = 0,fl = 0):
QDialog.__init__(self,parent,name,modal,fl)
if not name:
self.setName("ChooseDeviceDlg")
self.device_uri = ''
ChooseDeviceDlg_Layout = QGridLayout(self,1,1,6,6,"ChooseDeviceDlg_Layout")
self.OKButton = QPushButton(self,"OKButton")
ChooseDeviceDlg_Layout.addWidget(self.OKButton,2,2)
self.CancelButton = QPushButton(self,"CancelButton")
ChooseDeviceDlg_Layout.addWidget(self.CancelButton,2,1)
spacer1 = QSpacerItem(391,20,QSizePolicy.Expanding,QSizePolicy.Minimum)
ChooseDeviceDlg_Layout.addItem(spacer1,2,0)
spacer2 = QSpacerItem(20,290,QSizePolicy.Minimum,QSizePolicy.Expanding)
ChooseDeviceDlg_Layout.addItem(spacer2,1,0)
self.DevicesButtonGroup = QButtonGroup(self,"DevicesButtonGroup")
self.DevicesButtonGroup.setColumnLayout(0,Qt.Vertical)
self.DevicesButtonGroup.layout().setSpacing(6)
self.DevicesButtonGroup.layout().setMargin(6)
DevicesButtonGroupLayout = QGridLayout(self.DevicesButtonGroup.layout())
DevicesButtonGroupLayout.setAlignment(Qt.AlignTop)
self.radio_buttons = {}
last_used_device_uri = user_conf.get('last_used', 'device_uri')
last_used_index = None
for y in range(len(devices)):
self.radio_buttons[y] = QRadioButton(self.DevicesButtonGroup,"radioButton%d" % y)
self.radio_buttons[y].setText(devices[y][0])
if devices[y][0] == last_used_device_uri:
last_used_index = y
self.device_uri = devices[y][0]
DevicesButtonGroupLayout.addWidget(self.radio_buttons[y], y, 0)
if last_used_index is not None:
self.radio_buttons[last_used_index].setChecked(1)
else:
self.radio_buttons[0].setChecked(1)
self.device_uri = devices[0][0]
ChooseDeviceDlg_Layout.addMultiCellWidget(self.DevicesButtonGroup,0,0,0,2)
self.languageChange()
self.resize(QSize(592,112).expandedTo(self.minimumSizeHint()))
self.clearWState(Qt.WState_Polished)
self.connect(self.OKButton,SIGNAL("clicked()"),self,SLOT("accept()"))
self.connect(self.CancelButton,SIGNAL("clicked()"),self,SLOT("reject()"))
self.connect(self.DevicesButtonGroup,SIGNAL("clicked(int)"),self.DevicesButtonGroup_clicked)
def languageChange(self):
self.setCaption(self.__tr("Choose Device"))
self.OKButton.setText(self.__tr("OK"))
self.CancelButton.setText(self.__tr("Cancel"))
self.DevicesButtonGroup.setTitle(self.__tr("Available Devices:"))
def __tr(self,s,c = None):
return qApp.translate("ChooseDeviceDlg",s,c)
def DevicesButtonGroup_clicked(self,a0):
self.device_uri = to_unicode(self.radio_buttons[a0].text())
if __name__ == "__main__":
a = QApplication(sys.argv)
QObject.connect(a,SIGNAL("lastWindowClosed()"),a,SLOT("quit()"))
w = ChooseDeviceDlg()
a.setMainWidget(w)
w.show()
a.exec_loop()
| 37.073394
| 100
| 0.691908
|
794cb9225553a0568af4f9ef067e7f68e492b9ba
| 5,857
|
py
|
Python
|
diffco/kernel.py
|
rhgkrsus1/diffco
|
d5e393abee110b84ac94df449986dd0ed3f011a2
|
[
"MIT"
] | 11
|
2021-02-17T09:07:36.000Z
|
2022-03-13T10:11:19.000Z
|
diffco/kernel.py
|
rhgkrsus1/diffco
|
d5e393abee110b84ac94df449986dd0ed3f011a2
|
[
"MIT"
] | null | null | null |
diffco/kernel.py
|
rhgkrsus1/diffco
|
d5e393abee110b84ac94df449986dd0ed3f011a2
|
[
"MIT"
] | 3
|
2021-03-24T14:48:25.000Z
|
2022-02-16T08:53:19.000Z
|
import numpy as np
import torch
class KernelFunc:
def __init__(self):
pass
def __call__(self):
raise NotImplementedError('You need to define your own __call__ function.')
class RQKernel(KernelFunc):
def __init__(self, gamma, p=2):
self.gamma = gamma
self.p = p
def __call__(self, xs, x_primes):
if xs.ndim == 1:
xs = xs[np.newaxis, :]
xs = xs[:, np.newaxis] # change to [1, len(x), channel]
pair_diff = x_primes[np.newaxis, :] - xs
kvalues = (1/(1+self.gamma/self.p*torch.sum(pair_diff**2, dim=2))**self.p)
if kvalues.shape[0] == 1:
kvalues = kvalues.squeeze_(0)
return kvalues
class CauchyKernel(KernelFunc):
def __init__(self, c):
self.c = c
def __call__(self, xs, x_primes):
if xs.ndim == 1:
xs = xs[np.newaxis, :]
xs = xs[:, np.newaxis] # change to [1, len(x), channel]
pair_diff = x_primes[np.newaxis, :] - xs
kvalues = self.c / (np.sum(pair_diff**2, axis=2) + self.c)
if kvalues.shape[0] == 1:
kvalues = kvalues.squeeze_(0)
return kvalues
class MultiQuadratic(KernelFunc):
def __init__(self, epsilon):
self.epsilon = epsilon
def __call__(self, xs, x_primes):
if xs.ndim == 1:
xs = xs[np.newaxis, :]
xs = xs[:, np.newaxis] # change shape to [1, len(x), channel]
pair_diff = x_primes[np.newaxis, :] - xs # shape [len(x_primes), len(xs), channel]
kvalues = torch.sqrt(torch.sum(pair_diff**2, axis=2)/self.epsilon**2 + 1)
if kvalues.shape[0] == 1:
kvalues = kvalues.squeeze(0)
return kvalues
class Polyharmonic(KernelFunc):
def __init__(self, k, epsilon):
self.epsilon = epsilon
if k % 2 == 0:
def _even_func(r):
tmp = (r**k * torch.log(r))
tmp[torch.isnan(tmp)] = 0
return tmp
self._func = _even_func
else:
def _odd_func(r):
return r**k
self._func = _odd_func
def __call__(self, xs, x_primes):
if xs.ndim == 1:
xs = xs[np.newaxis, :]
r = torch.cdist(xs, x_primes)
kvalues = self._func(r) / self.epsilon
if kvalues.shape[1] == 1:
kvalues = kvalues.squeeze(1)
return kvalues
# def mq_r(self, r):
# kvalues = torch.sqrt(r**2/self.epsilon**2 + 1)
# return kvalues
# class mq(KernelFunc):
# def __init__(self, epsilon):
# self.epsilon = epsilon
# def __call__(self, xs, x_primes):
# if xs.ndim == 1:
# xs = xs[np.newaxis, :]
# xs = xs[np.newaxis, :] # change to [1, len(x), channel]
# pair_diff = x_primes[:, np.newaxis] - xs # [len(x_primes), len(xs), channel]
# kvalues = torch.sqrt(torch.sum(pair_diff**2, axis=2)
# if kvalues.shape[1] == 1:
# kvalues = kvalues.squeeze(1)
# return kvalues
class WeightedKernel(KernelFunc):
def __init__(self, gamma, w, p=2):
self.gamma = gamma
self.p = p
self.w = np.array(w).reshape((1, 1, -1))
def __call__(self, xs, x_primes):
if xs.ndim == 1:
xs = xs[np.newaxis, :]
xs = xs[:, np.newaxis] # change shape to [1, len(x), channel]
pair_diff = x_primes[np.newaxis, :] - xs # shape [len(x_primes), len(xs), channel]
kvalues = 1/(1+self.gamma/self.p*np.sum((pair_diff*self.w)**2, axis=2))**self.p
if kvalues.shape[1] == 1:
kvalues = kvalues.squeeze(1)
return kvalues
class TangentKernel(KernelFunc):
def __init__(self, a, c):
self.a = a
self.c = c
def __call__(self, xs, x_primes):
if xs.ndim == 1:
xs = xs[np.newaxis, :]
xs = xs[:, np.newaxis] # change shape to [1, len(x), channel]
pair_prod = x_primes[np.newaxis, :] * xs # [len(x_primes), len(xs), channel]
kvalues = np.tanh(self.a * np.sum(pair_prod, 2) + self.c)
if kvalues.shape[1] == 1:
kvalues = kvalues.squeeze(1)
return kvalues
class FKKernel(KernelFunc):
def __init__(self, fkine, rq_kernel):
self.fkine = fkine
self.rq_kernel = rq_kernel
def __call__(self, xs, x_primes):
if xs.ndim == 1:
xs = xs[np.newaxis, :]
xs_controls = self.fkine(xs).reshape(len(xs), -1)
x_primes_controls = self.fkine(x_primes).reshape(len(x_primes), -1)
return self.rq_kernel(xs_controls, x_primes_controls)
class LineKernel(KernelFunc):
def __init__(self, point_kernel):
self.point_kernel = point_kernel
def __call__(self, xs, x_primes):
if xs.ndim == 1:
xs = xs[np.newaxis, :]
if x_primes.ndim == 1:
x_primes = x_primes[np.newaxis, :]
twice_DOF = xs.shape[1]
assert twice_DOF == x_primes.shape[1]
assert twice_DOF%2 == 0
dof = twice_DOF // 2
return (self.point_kernel(xs[:, :dof], x_primes[:, :dof])\
+ self.point_kernel(xs[:, dof:], x_primes[:, dof:]))/2
class LineFKKernel(KernelFunc):
def __init__(self, fkine, rq_kernel):
self.fkine = fkine
self.rq_kernel = rq_kernel
def __call__(self, xs, x_primes):
if xs.ndim == 1:
xs = xs[np.newaxis, :]
if x_primes.ndim == 1:
x_primes = x_primes[np.newaxis, :]
twice_DOF = xs.shape[1]
assert twice_DOF == x_primes.shape[1]
assert twice_DOF % 2 == 0
dof = twice_DOF // 2
xs_controls = self.fkine(xs.reshape(-1, dof)).reshape(len(xs), -1)
x_primes_controls = self.fkine(x_primes.reshape(-1, dof)).reshape(len(x_primes), -1)
return self.rq_kernel(xs_controls, x_primes_controls)
| 33.855491
| 92
| 0.556428
|
794cba4204a3d086698d49316c5e2ae372f8a10f
| 5,606
|
py
|
Python
|
Cogs/Emoji.py
|
MrAngelDo6pa/MedBotS
|
89e19d831507e20d0898114502967b2ad8ecf957
|
[
"MIT"
] | 2
|
2021-09-28T10:40:10.000Z
|
2021-11-07T14:49:07.000Z
|
Cogs/Emoji.py
|
ddoskid/lol12
|
35c097bbebeca3043a939b902b07474473344a3c
|
[
"MIT"
] | null | null | null |
Cogs/Emoji.py
|
ddoskid/lol12
|
35c097bbebeca3043a939b902b07474473344a3c
|
[
"MIT"
] | null | null | null |
import discord, os
from discord.ext import commands
from Cogs import GetImage, Utils
def setup(bot):
bot.add_cog(Emoji(bot))
class Emoji(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.max_emojis = 10
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
def _get_emoji_url(self, emoji):
if len(emoji) < 3:
# Emoji is likely a built-in like :)
h = "-".join([hex(ord(x)).lower()[2:] for x in emoji])
return ("https://github.com/twitter/twemoji/raw/master/assets/72x72/{}.png".format(h),h)
# Must be a custom emoji
emojiparts = emoji.replace("<","").replace(">","").split(":") if emoji else []
if not len(emojiparts) == 3: return None
# Build a custom emoji object
emoji_obj = discord.PartialEmoji(animated=len(emojiparts[0]) > 0, name=emojiparts[1], id=emojiparts[2])
# Return the url
return (emoji_obj.url,emoji_obj.name)
def _get_emoji_mention(self, emoji):
return "<{}:{}:{}>".format("a" if emoji.animated else "",emoji.name,emoji.id)
@commands.command()
async def addemoji(self, ctx, *, emoji = None, name = None):
'''Adds the passed emoji, url, or attachment as a custom emoji with the passed name (bot-admin only, max of 10).'''
if not await Utils.is_bot_admin_reply(ctx): return
if not len(ctx.message.attachments) and emoji == name == None:
return await ctx.send("Usage: `{}addemoji [emoji, url, attachment] [name]`".format(ctx.prefix))
# Let's find out if we have an attachment, emoji, or a url
# Check attachments first - as they'll have priority
if len(ctx.message.attachments):
name = emoji
emoji = " ".join([x.url for x in ctx.message.attachments])
if name: # Add the name separated by a space
emoji += " "+name
# Now we split the emoji string, and walk it, looking for urls, emojis, and names
emojis_to_add = []
last_name = []
for x in emoji.split():
# Check for a url
urls = Utils.get_urls(x)
if len(urls):
url = (urls[0], os.path.basename(urls[0]).split(".")[0])
else:
# Check for an emoji
url = self._get_emoji_url(x)
if not url:
# Gotta be a part of the name - add it
last_name.append(x)
continue
if len(emojis_to_add) and last_name:
# Update the previous name if need be
emojis_to_add[-1][1] = "".join([z for z in "_".join(last_name) if z.isalnum() or z == "_"])
# We have a valid url or emoji here - let's make sure it's unique
if not url[0] in [x[0] for x in emojis_to_add]:
emojis_to_add.append([url[0],url[1]])
# Reset last_name
last_name = []
if len(emojis_to_add) and last_name:
# Update the final name if need be
emojis_to_add[-1][1] = "".join([z for z in "_".join(last_name) if z.isalnum() or z == "_"])
if not emojis_to_add: return await ctx.send("Usage: `{}addemoji [emoji, url, attachment] [name]`".format(ctx.prefix))
# Now we have a list of emojis and names
added_emojis = []
allowed = len(emojis_to_add) if len(emojis_to_add)<=self.max_emojis else self.max_emojis
omitted = " ({} omitted, beyond the limit of {})".format(len(emojis_to_add)-self.max_emojis,self.max_emojis) if len(emojis_to_add)>self.max_emojis else ""
message = await ctx.send("Adding {} emoji{}{}...".format(
allowed,
"" if allowed==1 else "s",
omitted))
for emoji_to_add in emojis_to_add[:self.max_emojis]:
# Let's try to download it
emoji,e_name = emoji_to_add # Expand into the parts
f = await GetImage.download(emoji)
if not f: continue
# Open the image file
with open(f,"rb") as e:
image = e.read()
# Clean up
GetImage.remove(f)
if not e_name.replace("_",""): continue
# Create the emoji and save it
try: new_emoji = await ctx.guild.create_custom_emoji(name=e_name,image=image,roles=None,reason="Added by {}#{}".format(ctx.author.name,ctx.author.discriminator))
except: continue
added_emojis.append(new_emoji)
msg = "Created {} of {} emoji{}{}.".format(
len(added_emojis),
allowed,"" if allowed==1 else "s",
omitted
)
if len(added_emojis):
msg += "\n\n"
emoji_text = ["{} - `:{}:`".format(self._get_emoji_mention(x),x.name) for x in added_emojis]
msg += "\n".join(emoji_text)
await message.edit(content=msg)
@commands.command()
async def emoji(self, ctx, emoji = None):
'''Outputs the passed emoji... but bigger!'''
if emoji is None:
await ctx.send("Usage: `{}emoji [emoji]`".format(ctx.prefix))
return
# Get the emoji
emoji_url = self._get_emoji_url(emoji)
if not emoji_url: return await ctx.send("Usage: `{}emoji [emoji]`".format(ctx.prefix))
f = await GetImage.download(emoji_url[0])
if not f: return await ctx.send("I couldn't get that emoji :(")
await ctx.send(file=discord.File(f))
# Clean up
GetImage.remove(f)
| 46.330579
| 173
| 0.570817
|
794cbafab4851cdb4130d3fab5d475e6da99210b
| 5,606
|
py
|
Python
|
python/rpc.py
|
necab0/vimsence
|
19d3eb901c2989519ae33052d1abe1878b21cb6b
|
[
"MIT"
] | 59
|
2018-06-22T11:39:40.000Z
|
2022-02-15T20:35:49.000Z
|
python/rpc.py
|
necab0/vimsence
|
19d3eb901c2989519ae33052d1abe1878b21cb6b
|
[
"MIT"
] | 9
|
2018-07-19T07:56:16.000Z
|
2020-10-25T23:50:24.000Z
|
python/rpc.py
|
necab0/vimsence
|
19d3eb901c2989519ae33052d1abe1878b21cb6b
|
[
"MIT"
] | 14
|
2018-07-19T03:43:26.000Z
|
2020-02-08T14:37:53.000Z
|
# References:
# * https://github.com/devsnek/discord-rpc/tree/master/src/transports/IPC.js
# * https://github.com/devsnek/discord-rpc/tree/master/example/main.js
# * https://github.com/discordapp/discord-rpc/tree/master/documentation/hard-mode.md
# * https://github.com/discordapp/discord-rpc/tree/master/src
# * https://discordapp.com/developers/docs/rich-presence/how-to#updating-presence-update-presence-payload-fields
from abc import ABCMeta, abstractmethod
import json
import logging
import os
import socket
import sys
import struct
import uuid
OP_HANDSHAKE = 0
OP_FRAME = 1
OP_CLOSE = 2
OP_PING = 3
OP_PONG = 4
logger = logging.getLogger(__name__)
class DiscordIpcError(Exception):
pass
class DiscordIpcClient(metaclass=ABCMeta):
"""Work with an open Discord instance via its JSON IPC for its rich presence API.
In a blocking way.
Classmethod `for_platform`
will resolve to one of WinDiscordIpcClient or UnixDiscordIpcClient,
depending on the current platform.
Supports context handler protocol.
"""
def __init__(self, client_id):
self.client_id = client_id
self._connect()
self._do_handshake()
logger.info("connected via ID %s", client_id)
@classmethod
def for_platform(cls, client_id, platform=sys.platform):
if platform == 'win32':
return WinDiscordIpcClient(client_id)
else:
return UnixDiscordIpcClient(client_id)
@abstractmethod
def _connect(self):
pass
def _do_handshake(self):
ret_op, ret_data = self.send_recv({'v': 1, 'client_id': self.client_id}, op=OP_HANDSHAKE)
# {'cmd': 'DISPATCH', 'data': {'v': 1, 'config': {...}}, 'evt': 'READY', 'nonce': None}
if ret_op == OP_FRAME and ret_data['cmd'] == 'DISPATCH' and ret_data['evt'] == 'READY':
return
else:
if ret_op == OP_CLOSE:
self.close()
raise RuntimeError(ret_data)
@abstractmethod
def _write(self, date: bytes):
pass
@abstractmethod
def _recv(self, size: int) -> bytes:
pass
def _recv_header(self) -> (int, int):
header = self._recv_exactly(8)
return struct.unpack("<II", header)
def _recv_exactly(self, size) -> bytes:
buf = b""
size_remaining = size
while size_remaining:
chunk = self._recv(size_remaining)
buf += chunk
size_remaining -= len(chunk)
return buf
def close(self):
logger.warning("closing connection")
try:
self.send({}, op=OP_CLOSE)
finally:
self._close()
@abstractmethod
def _close(self):
pass
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def send_recv(self, data, op=OP_FRAME):
self.send(data, op)
return self.recv()
def send(self, data, op=OP_FRAME):
logger.debug("sending %s", data)
data_str = json.dumps(data, separators=(',', ':'))
data_bytes = data_str.encode('utf-8')
header = struct.pack("<II", op, len(data_bytes))
self._write(header)
self._write(data_bytes)
def recv(self) -> (int, "JSON"):
"""Receives a packet from discord.
Returns op code and payload.
"""
op, length = self._recv_header()
payload = self._recv_exactly(length)
data = json.loads(payload.decode('utf-8'))
logger.debug("received %s", data)
return op, data
def set_activity(self, act):
# act
data = {
'cmd': 'SET_ACTIVITY',
'args': {'pid': os.getpid(),
'activity': act},
'nonce': str(uuid.uuid4())
}
self.send(data)
class WinDiscordIpcClient(DiscordIpcClient):
_pipe_pattern = R'\\?\pipe\discord-ipc-{}'
def _connect(self):
for i in range(10):
path = self._pipe_pattern.format(i)
try:
self._f = open(path, "w+b")
except OSError as e:
logger.error("failed to open {!r}: {}".format(path, e))
else:
break
else:
return DiscordIpcError("Failed to connect to Discord pipe")
self.path = path
def _write(self, data: bytes):
self._f.write(data)
self._f.flush()
def _recv(self, size: int) -> bytes:
return self._f.read(size)
def _close(self):
self._f.close()
class UnixDiscordIpcClient(DiscordIpcClient):
def _connect(self):
self._sock = socket.socket(socket.AF_UNIX)
pipe_pattern = self._get_pipe_pattern()
for i in range(10):
path = pipe_pattern.format(i)
if not os.path.exists(path):
continue
try:
self._sock.connect(path)
except OSError as e:
pass
else:
break
else:
return DiscordIpcError("Failed to connect to Discord pipe")
@staticmethod
def _get_pipe_pattern():
env_keys = ('XDG_RUNTIME_DIR', 'TMPDIR', 'TMP', 'TEMP')
for env_key in env_keys:
dir_path = os.environ.get(env_key)
if dir_path:
break
else:
dir_path = '/tmp'
return os.path.join(dir_path, 'discord-ipc-{}')
def _write(self, data: bytes):
self._sock.sendall(data)
def _recv(self, size: int) -> bytes:
return self._sock.recv(size)
def _close(self):
self._sock.close()
| 26.951923
| 112
| 0.584909
|
794cbc658286d28b91915f5e5a2f3faa03478373
| 1,500
|
py
|
Python
|
kea/utils/_signal_assigner.py
|
SmartAcoustics/Kea
|
5790f18dafccfc01fe9dbe98de5bb1a5ce584c56
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 3
|
2020-02-28T13:03:59.000Z
|
2020-09-20T06:33:04.000Z
|
kea/utils/_signal_assigner.py
|
SmartAcoustics/Kea
|
5790f18dafccfc01fe9dbe98de5bb1a5ce584c56
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
kea/utils/_signal_assigner.py
|
SmartAcoustics/Kea
|
5790f18dafccfc01fe9dbe98de5bb1a5ce584c56
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 3
|
2018-12-17T16:33:08.000Z
|
2020-01-21T14:10:25.000Z
|
from myhdl import *
@block
def boolean_signal_assigner(signal_in, signal_out):
@always_comb
def assigner():
# Pass the signal straight through
signal_out.next = signal_in
return assigner
@block
def intbv_signal_assigner(signal_in, signal_out, offset):
@always_comb
def assigner():
# Shift the signal as requested
signal_out.next[:offset] = signal_in
return assigner
@block
def intbv_to_signed_signal_assigner(signal_in, signal_out, offset):
@always_comb
def assigner():
# Convert to signed and shift the signal as requested
signal_out.next[:offset] = signal_in.signed()
return assigner
@block
def signal_assigner(signal_in, signal_out, offset=0, convert_to_signed=False):
''' Assigns the signal_in to the signal_out shifted by the offset value.
If convert_to_signed is true, then this block will convert the input to a
signed value as part of the assignment.
'''
if len(signal_out) < len(signal_in) + offset:
raise ValueError(
'signal_out must be wide enough to accomodate the signal_in '
'shifted by offset.')
if len(signal_out) == 1:
return boolean_signal_assigner(signal_in, signal_out)
else:
if convert_to_signed:
return (
intbv_to_signed_signal_assigner(
signal_in, signal_out, offset))
else:
return intbv_signal_assigner(signal_in, signal_out, offset)
| 26.785714
| 78
| 0.674667
|
794cbced65e9844fb5a988e06c46fbfd59ae4e4b
| 492
|
py
|
Python
|
create_lesson_plan/migrations/0006_lesson_score.py
|
rishabhranawat/CrowdPlatform
|
1de2ad7e70fbf6cbf2e29bc9368341134b4f7e0d
|
[
"MIT"
] | 1
|
2020-07-23T21:35:40.000Z
|
2020-07-23T21:35:40.000Z
|
create_lesson_plan/migrations/0006_lesson_score.py
|
rishabhranawat/CrowdPlatform
|
1de2ad7e70fbf6cbf2e29bc9368341134b4f7e0d
|
[
"MIT"
] | 9
|
2021-02-08T20:32:35.000Z
|
2022-03-02T14:58:07.000Z
|
create_lesson_plan/migrations/0006_lesson_score.py
|
rishabhranawat/CrowdPlatform
|
1de2ad7e70fbf6cbf2e29bc9368341134b4f7e0d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-03-22 15:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('create_lesson_plan', '0005_auto_20170317_1709'),
]
operations = [
migrations.AddField(
model_name='lesson',
name='score',
field=models.IntegerField(default=0),
preserve_default=False,
),
]
| 22.363636
| 58
| 0.619919
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.